]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <[email protected]> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
bfe4037e | 8 | * Copyright 2018 Christoph Hellwig. |
1da177e4 LT |
9 | * |
10 | * See ../COPYING for licensing terms. | |
11 | */ | |
caf4167a KO |
12 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
13 | ||
1da177e4 LT |
14 | #include <linux/kernel.h> |
15 | #include <linux/init.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/time.h> | |
18 | #include <linux/aio_abi.h> | |
630d9c47 | 19 | #include <linux/export.h> |
1da177e4 | 20 | #include <linux/syscalls.h> |
b9d128f1 | 21 | #include <linux/backing-dev.h> |
9018ccc4 | 22 | #include <linux/refcount.h> |
027445c3 | 23 | #include <linux/uio.h> |
1da177e4 | 24 | |
174cd4b1 | 25 | #include <linux/sched/signal.h> |
1da177e4 LT |
26 | #include <linux/fs.h> |
27 | #include <linux/file.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/mman.h> | |
3d2d827f | 30 | #include <linux/mmu_context.h> |
e1bdd5f2 | 31 | #include <linux/percpu.h> |
1da177e4 LT |
32 | #include <linux/slab.h> |
33 | #include <linux/timer.h> | |
34 | #include <linux/aio.h> | |
35 | #include <linux/highmem.h> | |
36 | #include <linux/workqueue.h> | |
37 | #include <linux/security.h> | |
9c3060be | 38 | #include <linux/eventfd.h> |
cfb1e33e | 39 | #include <linux/blkdev.h> |
9d85cba7 | 40 | #include <linux/compat.h> |
36bc08cc GZ |
41 | #include <linux/migrate.h> |
42 | #include <linux/ramfs.h> | |
723be6e3 | 43 | #include <linux/percpu-refcount.h> |
71ad7490 | 44 | #include <linux/mount.h> |
1da177e4 LT |
45 | |
46 | #include <asm/kmap_types.h> | |
7c0f6ba6 | 47 | #include <linux/uaccess.h> |
a538e3ff | 48 | #include <linux/nospec.h> |
1da177e4 | 49 | |
68d70d03 AV |
50 | #include "internal.h" |
51 | ||
f3a2752a CH |
52 | #define KIOCB_KEY 0 |
53 | ||
4e179bca KO |
54 | #define AIO_RING_MAGIC 0xa10a10a1 |
55 | #define AIO_RING_COMPAT_FEATURES 1 | |
56 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
57 | struct aio_ring { | |
58 | unsigned id; /* kernel internal index number */ | |
59 | unsigned nr; /* number of io_events */ | |
fa8a53c3 BL |
60 | unsigned head; /* Written to by userland or under ring_lock |
61 | * mutex by aio_read_events_ring(). */ | |
4e179bca KO |
62 | unsigned tail; |
63 | ||
64 | unsigned magic; | |
65 | unsigned compat_features; | |
66 | unsigned incompat_features; | |
67 | unsigned header_length; /* size of aio_ring */ | |
68 | ||
69 | ||
70 | struct io_event io_events[0]; | |
71 | }; /* 128 bytes + ring size */ | |
72 | ||
a79d40e9 JA |
73 | /* |
74 | * Plugging is meant to work with larger batches of IOs. If we don't | |
75 | * have more than the below, then don't bother setting up a plug. | |
76 | */ | |
77 | #define AIO_PLUG_THRESHOLD 2 | |
78 | ||
4e179bca | 79 | #define AIO_RING_PAGES 8 |
4e179bca | 80 | |
db446a08 | 81 | struct kioctx_table { |
d0264c01 TH |
82 | struct rcu_head rcu; |
83 | unsigned nr; | |
84 | struct kioctx __rcu *table[]; | |
db446a08 BL |
85 | }; |
86 | ||
e1bdd5f2 KO |
87 | struct kioctx_cpu { |
88 | unsigned reqs_available; | |
89 | }; | |
90 | ||
dc48e56d JA |
91 | struct ctx_rq_wait { |
92 | struct completion comp; | |
93 | atomic_t count; | |
94 | }; | |
95 | ||
4e179bca | 96 | struct kioctx { |
723be6e3 | 97 | struct percpu_ref users; |
36f55889 | 98 | atomic_t dead; |
4e179bca | 99 | |
e34ecee2 KO |
100 | struct percpu_ref reqs; |
101 | ||
4e179bca | 102 | unsigned long user_id; |
4e179bca | 103 | |
e1bdd5f2 KO |
104 | struct __percpu kioctx_cpu *cpu; |
105 | ||
106 | /* | |
107 | * For percpu reqs_available, number of slots we move to/from global | |
108 | * counter at a time: | |
109 | */ | |
110 | unsigned req_batch; | |
3e845ce0 KO |
111 | /* |
112 | * This is what userspace passed to io_setup(), it's not used for | |
113 | * anything but counting against the global max_reqs quota. | |
114 | * | |
58c85dc2 | 115 | * The real limit is nr_events - 1, which will be larger (see |
3e845ce0 KO |
116 | * aio_setup_ring()) |
117 | */ | |
4e179bca KO |
118 | unsigned max_reqs; |
119 | ||
58c85dc2 KO |
120 | /* Size of ringbuffer, in units of struct io_event */ |
121 | unsigned nr_events; | |
4e179bca | 122 | |
58c85dc2 KO |
123 | unsigned long mmap_base; |
124 | unsigned long mmap_size; | |
125 | ||
126 | struct page **ring_pages; | |
127 | long nr_pages; | |
128 | ||
f729863a | 129 | struct rcu_work free_rwork; /* see free_ioctx() */ |
4e23bcae | 130 | |
e02ba72a AP |
131 | /* |
132 | * signals when all in-flight requests are done | |
133 | */ | |
dc48e56d | 134 | struct ctx_rq_wait *rq_wait; |
e02ba72a | 135 | |
4e23bcae | 136 | struct { |
34e83fc6 KO |
137 | /* |
138 | * This counts the number of available slots in the ringbuffer, | |
139 | * so we avoid overflowing it: it's decremented (if positive) | |
140 | * when allocating a kiocb and incremented when the resulting | |
141 | * io_event is pulled off the ringbuffer. | |
e1bdd5f2 KO |
142 | * |
143 | * We batch accesses to it with a percpu version. | |
34e83fc6 KO |
144 | */ |
145 | atomic_t reqs_available; | |
4e23bcae KO |
146 | } ____cacheline_aligned_in_smp; |
147 | ||
148 | struct { | |
149 | spinlock_t ctx_lock; | |
150 | struct list_head active_reqs; /* used for cancellation */ | |
151 | } ____cacheline_aligned_in_smp; | |
152 | ||
58c85dc2 KO |
153 | struct { |
154 | struct mutex ring_lock; | |
4e23bcae KO |
155 | wait_queue_head_t wait; |
156 | } ____cacheline_aligned_in_smp; | |
58c85dc2 KO |
157 | |
158 | struct { | |
159 | unsigned tail; | |
d856f32a | 160 | unsigned completed_events; |
58c85dc2 | 161 | spinlock_t completion_lock; |
4e23bcae | 162 | } ____cacheline_aligned_in_smp; |
58c85dc2 KO |
163 | |
164 | struct page *internal_pages[AIO_RING_PAGES]; | |
36bc08cc | 165 | struct file *aio_ring_file; |
db446a08 BL |
166 | |
167 | unsigned id; | |
4e179bca KO |
168 | }; |
169 | ||
a3c0d439 CH |
170 | struct fsync_iocb { |
171 | struct work_struct work; | |
172 | struct file *file; | |
173 | bool datasync; | |
174 | }; | |
175 | ||
bfe4037e CH |
176 | struct poll_iocb { |
177 | struct file *file; | |
178 | struct wait_queue_head *head; | |
179 | __poll_t events; | |
180 | bool woken; | |
181 | bool cancelled; | |
182 | struct wait_queue_entry wait; | |
183 | struct work_struct work; | |
184 | }; | |
185 | ||
04b2fa9f | 186 | struct aio_kiocb { |
54843f87 CH |
187 | union { |
188 | struct kiocb rw; | |
a3c0d439 | 189 | struct fsync_iocb fsync; |
bfe4037e | 190 | struct poll_iocb poll; |
54843f87 | 191 | }; |
04b2fa9f CH |
192 | |
193 | struct kioctx *ki_ctx; | |
194 | kiocb_cancel_fn *ki_cancel; | |
195 | ||
196 | struct iocb __user *ki_user_iocb; /* user's aiocb */ | |
197 | __u64 ki_user_data; /* user's data for completion */ | |
198 | ||
199 | struct list_head ki_list; /* the aio core uses this | |
200 | * for cancellation */ | |
9018ccc4 | 201 | refcount_t ki_refcnt; |
04b2fa9f CH |
202 | |
203 | /* | |
204 | * If the aio_resfd field of the userspace iocb is not zero, | |
205 | * this is the underlying eventfd context to deliver events to. | |
206 | */ | |
207 | struct eventfd_ctx *ki_eventfd; | |
208 | }; | |
209 | ||
1da177e4 | 210 | /*------ sysctl variables----*/ |
d55b5fda ZB |
211 | static DEFINE_SPINLOCK(aio_nr_lock); |
212 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
213 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
1da177e4 LT |
214 | /*----end sysctl variables---*/ |
215 | ||
e18b890b CL |
216 | static struct kmem_cache *kiocb_cachep; |
217 | static struct kmem_cache *kioctx_cachep; | |
1da177e4 | 218 | |
71ad7490 BL |
219 | static struct vfsmount *aio_mnt; |
220 | ||
221 | static const struct file_operations aio_ring_fops; | |
222 | static const struct address_space_operations aio_ctx_aops; | |
223 | ||
224 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | |
225 | { | |
71ad7490 | 226 | struct file *file; |
71ad7490 | 227 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); |
7f62656b DC |
228 | if (IS_ERR(inode)) |
229 | return ERR_CAST(inode); | |
71ad7490 BL |
230 | |
231 | inode->i_mapping->a_ops = &aio_ctx_aops; | |
232 | inode->i_mapping->private_data = ctx; | |
233 | inode->i_size = PAGE_SIZE * nr_pages; | |
234 | ||
d93aa9d8 AV |
235 | file = alloc_file_pseudo(inode, aio_mnt, "[aio]", |
236 | O_RDWR, &aio_ring_fops); | |
c9c554f2 | 237 | if (IS_ERR(file)) |
71ad7490 | 238 | iput(inode); |
71ad7490 BL |
239 | return file; |
240 | } | |
241 | ||
242 | static struct dentry *aio_mount(struct file_system_type *fs_type, | |
243 | int flags, const char *dev_name, void *data) | |
244 | { | |
d93aa9d8 | 245 | struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL, |
22f6b4d3 JH |
246 | AIO_RING_MAGIC); |
247 | ||
248 | if (!IS_ERR(root)) | |
249 | root->d_sb->s_iflags |= SB_I_NOEXEC; | |
250 | return root; | |
71ad7490 BL |
251 | } |
252 | ||
1da177e4 LT |
253 | /* aio_setup |
254 | * Creates the slab caches used by the aio routines, panic on | |
255 | * failure as this is done early during the boot sequence. | |
256 | */ | |
257 | static int __init aio_setup(void) | |
258 | { | |
71ad7490 BL |
259 | static struct file_system_type aio_fs = { |
260 | .name = "aio", | |
261 | .mount = aio_mount, | |
262 | .kill_sb = kill_anon_super, | |
263 | }; | |
264 | aio_mnt = kern_mount(&aio_fs); | |
265 | if (IS_ERR(aio_mnt)) | |
266 | panic("Failed to create aio fs mount."); | |
267 | ||
04b2fa9f | 268 | kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
0a31bd5f | 269 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
1da177e4 LT |
270 | return 0; |
271 | } | |
385773e0 | 272 | __initcall(aio_setup); |
1da177e4 | 273 | |
5e9ae2e5 BL |
274 | static void put_aio_ring_file(struct kioctx *ctx) |
275 | { | |
276 | struct file *aio_ring_file = ctx->aio_ring_file; | |
de04e769 RV |
277 | struct address_space *i_mapping; |
278 | ||
5e9ae2e5 | 279 | if (aio_ring_file) { |
45063097 | 280 | truncate_setsize(file_inode(aio_ring_file), 0); |
5e9ae2e5 BL |
281 | |
282 | /* Prevent further access to the kioctx from migratepages */ | |
45063097 | 283 | i_mapping = aio_ring_file->f_mapping; |
de04e769 RV |
284 | spin_lock(&i_mapping->private_lock); |
285 | i_mapping->private_data = NULL; | |
5e9ae2e5 | 286 | ctx->aio_ring_file = NULL; |
de04e769 | 287 | spin_unlock(&i_mapping->private_lock); |
5e9ae2e5 BL |
288 | |
289 | fput(aio_ring_file); | |
290 | } | |
291 | } | |
292 | ||
1da177e4 LT |
293 | static void aio_free_ring(struct kioctx *ctx) |
294 | { | |
36bc08cc | 295 | int i; |
1da177e4 | 296 | |
fa8a53c3 BL |
297 | /* Disconnect the kiotx from the ring file. This prevents future |
298 | * accesses to the kioctx from page migration. | |
299 | */ | |
300 | put_aio_ring_file(ctx); | |
301 | ||
36bc08cc | 302 | for (i = 0; i < ctx->nr_pages; i++) { |
8e321fef | 303 | struct page *page; |
36bc08cc GZ |
304 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
305 | page_count(ctx->ring_pages[i])); | |
8e321fef BL |
306 | page = ctx->ring_pages[i]; |
307 | if (!page) | |
308 | continue; | |
309 | ctx->ring_pages[i] = NULL; | |
310 | put_page(page); | |
36bc08cc | 311 | } |
1da177e4 | 312 | |
ddb8c45b | 313 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
58c85dc2 | 314 | kfree(ctx->ring_pages); |
ddb8c45b SL |
315 | ctx->ring_pages = NULL; |
316 | } | |
36bc08cc GZ |
317 | } |
318 | ||
5477e70a | 319 | static int aio_ring_mremap(struct vm_area_struct *vma) |
e4a0d3e7 | 320 | { |
5477e70a | 321 | struct file *file = vma->vm_file; |
e4a0d3e7 PE |
322 | struct mm_struct *mm = vma->vm_mm; |
323 | struct kioctx_table *table; | |
b2edffdd | 324 | int i, res = -EINVAL; |
e4a0d3e7 PE |
325 | |
326 | spin_lock(&mm->ioctx_lock); | |
327 | rcu_read_lock(); | |
328 | table = rcu_dereference(mm->ioctx_table); | |
329 | for (i = 0; i < table->nr; i++) { | |
330 | struct kioctx *ctx; | |
331 | ||
d0264c01 | 332 | ctx = rcu_dereference(table->table[i]); |
e4a0d3e7 | 333 | if (ctx && ctx->aio_ring_file == file) { |
b2edffdd AV |
334 | if (!atomic_read(&ctx->dead)) { |
335 | ctx->user_id = ctx->mmap_base = vma->vm_start; | |
336 | res = 0; | |
337 | } | |
e4a0d3e7 PE |
338 | break; |
339 | } | |
340 | } | |
341 | ||
342 | rcu_read_unlock(); | |
343 | spin_unlock(&mm->ioctx_lock); | |
b2edffdd | 344 | return res; |
e4a0d3e7 PE |
345 | } |
346 | ||
5477e70a ON |
347 | static const struct vm_operations_struct aio_ring_vm_ops = { |
348 | .mremap = aio_ring_mremap, | |
349 | #if IS_ENABLED(CONFIG_MMU) | |
350 | .fault = filemap_fault, | |
351 | .map_pages = filemap_map_pages, | |
352 | .page_mkwrite = filemap_page_mkwrite, | |
353 | #endif | |
354 | }; | |
355 | ||
356 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | |
357 | { | |
358 | vma->vm_flags |= VM_DONTEXPAND; | |
359 | vma->vm_ops = &aio_ring_vm_ops; | |
360 | return 0; | |
361 | } | |
362 | ||
36bc08cc GZ |
363 | static const struct file_operations aio_ring_fops = { |
364 | .mmap = aio_ring_mmap, | |
365 | }; | |
366 | ||
0c45355f | 367 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc GZ |
368 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
369 | struct page *old, enum migrate_mode mode) | |
370 | { | |
5e9ae2e5 | 371 | struct kioctx *ctx; |
36bc08cc | 372 | unsigned long flags; |
fa8a53c3 | 373 | pgoff_t idx; |
36bc08cc GZ |
374 | int rc; |
375 | ||
2916ecc0 JG |
376 | /* |
377 | * We cannot support the _NO_COPY case here, because copy needs to | |
378 | * happen under the ctx->completion_lock. That does not work with the | |
379 | * migration workflow of MIGRATE_SYNC_NO_COPY. | |
380 | */ | |
381 | if (mode == MIGRATE_SYNC_NO_COPY) | |
382 | return -EINVAL; | |
383 | ||
8e321fef BL |
384 | rc = 0; |
385 | ||
fa8a53c3 | 386 | /* mapping->private_lock here protects against the kioctx teardown. */ |
8e321fef BL |
387 | spin_lock(&mapping->private_lock); |
388 | ctx = mapping->private_data; | |
fa8a53c3 BL |
389 | if (!ctx) { |
390 | rc = -EINVAL; | |
391 | goto out; | |
392 | } | |
393 | ||
394 | /* The ring_lock mutex. The prevents aio_read_events() from writing | |
395 | * to the ring's head, and prevents page migration from mucking in | |
396 | * a partially initialized kiotx. | |
397 | */ | |
398 | if (!mutex_trylock(&ctx->ring_lock)) { | |
399 | rc = -EAGAIN; | |
400 | goto out; | |
401 | } | |
402 | ||
403 | idx = old->index; | |
404 | if (idx < (pgoff_t)ctx->nr_pages) { | |
405 | /* Make sure the old page hasn't already been changed */ | |
406 | if (ctx->ring_pages[idx] != old) | |
407 | rc = -EAGAIN; | |
8e321fef BL |
408 | } else |
409 | rc = -EINVAL; | |
8e321fef BL |
410 | |
411 | if (rc != 0) | |
fa8a53c3 | 412 | goto out_unlock; |
8e321fef | 413 | |
36bc08cc GZ |
414 | /* Writeback must be complete */ |
415 | BUG_ON(PageWriteback(old)); | |
8e321fef | 416 | get_page(new); |
36bc08cc | 417 | |
ab41ee68 | 418 | rc = migrate_page_move_mapping(mapping, new, old, mode, 1); |
36bc08cc | 419 | if (rc != MIGRATEPAGE_SUCCESS) { |
8e321fef | 420 | put_page(new); |
fa8a53c3 | 421 | goto out_unlock; |
36bc08cc GZ |
422 | } |
423 | ||
fa8a53c3 BL |
424 | /* Take completion_lock to prevent other writes to the ring buffer |
425 | * while the old page is copied to the new. This prevents new | |
426 | * events from being lost. | |
5e9ae2e5 | 427 | */ |
fa8a53c3 BL |
428 | spin_lock_irqsave(&ctx->completion_lock, flags); |
429 | migrate_page_copy(new, old); | |
430 | BUG_ON(ctx->ring_pages[idx] != old); | |
431 | ctx->ring_pages[idx] = new; | |
432 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | |
36bc08cc | 433 | |
fa8a53c3 BL |
434 | /* The old page is no longer accessible. */ |
435 | put_page(old); | |
8e321fef | 436 | |
fa8a53c3 BL |
437 | out_unlock: |
438 | mutex_unlock(&ctx->ring_lock); | |
439 | out: | |
440 | spin_unlock(&mapping->private_lock); | |
36bc08cc | 441 | return rc; |
1da177e4 | 442 | } |
0c45355f | 443 | #endif |
1da177e4 | 444 | |
36bc08cc | 445 | static const struct address_space_operations aio_ctx_aops = { |
835f252c | 446 | .set_page_dirty = __set_page_dirty_no_writeback, |
0c45355f | 447 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc | 448 | .migratepage = aio_migratepage, |
0c45355f | 449 | #endif |
36bc08cc GZ |
450 | }; |
451 | ||
2a8a9867 | 452 | static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) |
1da177e4 LT |
453 | { |
454 | struct aio_ring *ring; | |
41003a7b | 455 | struct mm_struct *mm = current->mm; |
3dc9acb6 | 456 | unsigned long size, unused; |
1da177e4 | 457 | int nr_pages; |
36bc08cc GZ |
458 | int i; |
459 | struct file *file; | |
1da177e4 LT |
460 | |
461 | /* Compensate for the ring buffer's head/tail overlap entry */ | |
462 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
463 | ||
464 | size = sizeof(struct aio_ring); | |
465 | size += sizeof(struct io_event) * nr_events; | |
1da177e4 | 466 | |
36bc08cc | 467 | nr_pages = PFN_UP(size); |
1da177e4 LT |
468 | if (nr_pages < 0) |
469 | return -EINVAL; | |
470 | ||
71ad7490 | 471 | file = aio_private_file(ctx, nr_pages); |
36bc08cc GZ |
472 | if (IS_ERR(file)) { |
473 | ctx->aio_ring_file = NULL; | |
fa8a53c3 | 474 | return -ENOMEM; |
36bc08cc GZ |
475 | } |
476 | ||
3dc9acb6 LT |
477 | ctx->aio_ring_file = file; |
478 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | |
479 | / sizeof(struct io_event); | |
480 | ||
481 | ctx->ring_pages = ctx->internal_pages; | |
482 | if (nr_pages > AIO_RING_PAGES) { | |
483 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | |
484 | GFP_KERNEL); | |
485 | if (!ctx->ring_pages) { | |
486 | put_aio_ring_file(ctx); | |
487 | return -ENOMEM; | |
488 | } | |
489 | } | |
490 | ||
36bc08cc GZ |
491 | for (i = 0; i < nr_pages; i++) { |
492 | struct page *page; | |
45063097 | 493 | page = find_or_create_page(file->f_mapping, |
36bc08cc GZ |
494 | i, GFP_HIGHUSER | __GFP_ZERO); |
495 | if (!page) | |
496 | break; | |
497 | pr_debug("pid(%d) page[%d]->count=%d\n", | |
498 | current->pid, i, page_count(page)); | |
499 | SetPageUptodate(page); | |
36bc08cc | 500 | unlock_page(page); |
3dc9acb6 LT |
501 | |
502 | ctx->ring_pages[i] = page; | |
36bc08cc | 503 | } |
3dc9acb6 | 504 | ctx->nr_pages = i; |
1da177e4 | 505 | |
3dc9acb6 LT |
506 | if (unlikely(i != nr_pages)) { |
507 | aio_free_ring(ctx); | |
fa8a53c3 | 508 | return -ENOMEM; |
1da177e4 LT |
509 | } |
510 | ||
58c85dc2 KO |
511 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
512 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); | |
36bc08cc | 513 | |
013373e8 MH |
514 | if (down_write_killable(&mm->mmap_sem)) { |
515 | ctx->mmap_size = 0; | |
516 | aio_free_ring(ctx); | |
517 | return -EINTR; | |
518 | } | |
519 | ||
36bc08cc GZ |
520 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
521 | PROT_READ | PROT_WRITE, | |
897ab3e0 | 522 | MAP_SHARED, 0, &unused, NULL); |
3dc9acb6 | 523 | up_write(&mm->mmap_sem); |
58c85dc2 | 524 | if (IS_ERR((void *)ctx->mmap_base)) { |
58c85dc2 | 525 | ctx->mmap_size = 0; |
1da177e4 | 526 | aio_free_ring(ctx); |
fa8a53c3 | 527 | return -ENOMEM; |
1da177e4 LT |
528 | } |
529 | ||
58c85dc2 | 530 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
d6c355c7 | 531 | |
58c85dc2 KO |
532 | ctx->user_id = ctx->mmap_base; |
533 | ctx->nr_events = nr_events; /* trusted copy */ | |
1da177e4 | 534 | |
58c85dc2 | 535 | ring = kmap_atomic(ctx->ring_pages[0]); |
1da177e4 | 536 | ring->nr = nr_events; /* user copy */ |
db446a08 | 537 | ring->id = ~0U; |
1da177e4 LT |
538 | ring->head = ring->tail = 0; |
539 | ring->magic = AIO_RING_MAGIC; | |
540 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
541 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
542 | ring->header_length = sizeof(struct aio_ring); | |
e8e3c3d6 | 543 | kunmap_atomic(ring); |
58c85dc2 | 544 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 LT |
545 | |
546 | return 0; | |
547 | } | |
548 | ||
1da177e4 LT |
549 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
550 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
551 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
552 | ||
04b2fa9f | 553 | void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) |
0460fef2 | 554 | { |
54843f87 | 555 | struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw); |
0460fef2 KO |
556 | struct kioctx *ctx = req->ki_ctx; |
557 | unsigned long flags; | |
558 | ||
75321b50 CH |
559 | if (WARN_ON_ONCE(!list_empty(&req->ki_list))) |
560 | return; | |
0460fef2 | 561 | |
75321b50 CH |
562 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
563 | list_add_tail(&req->ki_list, &ctx->active_reqs); | |
0460fef2 | 564 | req->ki_cancel = cancel; |
0460fef2 KO |
565 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
566 | } | |
567 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | |
568 | ||
a6d7cff4 TH |
569 | /* |
570 | * free_ioctx() should be RCU delayed to synchronize against the RCU | |
571 | * protected lookup_ioctx() and also needs process context to call | |
f729863a | 572 | * aio_free_ring(). Use rcu_work. |
a6d7cff4 | 573 | */ |
e34ecee2 | 574 | static void free_ioctx(struct work_struct *work) |
36f55889 | 575 | { |
f729863a TH |
576 | struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, |
577 | free_rwork); | |
e34ecee2 | 578 | pr_debug("freeing %p\n", ctx); |
e1bdd5f2 | 579 | |
e34ecee2 | 580 | aio_free_ring(ctx); |
e1bdd5f2 | 581 | free_percpu(ctx->cpu); |
9a1049da TH |
582 | percpu_ref_exit(&ctx->reqs); |
583 | percpu_ref_exit(&ctx->users); | |
36f55889 KO |
584 | kmem_cache_free(kioctx_cachep, ctx); |
585 | } | |
586 | ||
e34ecee2 KO |
587 | static void free_ioctx_reqs(struct percpu_ref *ref) |
588 | { | |
589 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | |
590 | ||
e02ba72a | 591 | /* At this point we know that there are no any in-flight requests */ |
dc48e56d JA |
592 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) |
593 | complete(&ctx->rq_wait->comp); | |
e02ba72a | 594 | |
a6d7cff4 | 595 | /* Synchronize against RCU protected table->table[] dereferences */ |
f729863a TH |
596 | INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); |
597 | queue_rcu_work(system_wq, &ctx->free_rwork); | |
e34ecee2 KO |
598 | } |
599 | ||
36f55889 KO |
600 | /* |
601 | * When this function runs, the kioctx has been removed from the "hash table" | |
602 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | |
603 | * now it's safe to cancel any that need to be. | |
604 | */ | |
e34ecee2 | 605 | static void free_ioctx_users(struct percpu_ref *ref) |
36f55889 | 606 | { |
e34ecee2 | 607 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
04b2fa9f | 608 | struct aio_kiocb *req; |
36f55889 KO |
609 | |
610 | spin_lock_irq(&ctx->ctx_lock); | |
611 | ||
612 | while (!list_empty(&ctx->active_reqs)) { | |
613 | req = list_first_entry(&ctx->active_reqs, | |
04b2fa9f | 614 | struct aio_kiocb, ki_list); |
888933f8 | 615 | req->ki_cancel(&req->rw); |
4faa9996 | 616 | list_del_init(&req->ki_list); |
36f55889 KO |
617 | } |
618 | ||
619 | spin_unlock_irq(&ctx->ctx_lock); | |
620 | ||
e34ecee2 KO |
621 | percpu_ref_kill(&ctx->reqs); |
622 | percpu_ref_put(&ctx->reqs); | |
36f55889 KO |
623 | } |
624 | ||
db446a08 BL |
625 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
626 | { | |
627 | unsigned i, new_nr; | |
628 | struct kioctx_table *table, *old; | |
629 | struct aio_ring *ring; | |
630 | ||
631 | spin_lock(&mm->ioctx_lock); | |
855ef0de | 632 | table = rcu_dereference_raw(mm->ioctx_table); |
db446a08 BL |
633 | |
634 | while (1) { | |
635 | if (table) | |
636 | for (i = 0; i < table->nr; i++) | |
d0264c01 | 637 | if (!rcu_access_pointer(table->table[i])) { |
db446a08 | 638 | ctx->id = i; |
d0264c01 | 639 | rcu_assign_pointer(table->table[i], ctx); |
db446a08 BL |
640 | spin_unlock(&mm->ioctx_lock); |
641 | ||
fa8a53c3 BL |
642 | /* While kioctx setup is in progress, |
643 | * we are protected from page migration | |
644 | * changes ring_pages by ->ring_lock. | |
645 | */ | |
db446a08 BL |
646 | ring = kmap_atomic(ctx->ring_pages[0]); |
647 | ring->id = ctx->id; | |
648 | kunmap_atomic(ring); | |
649 | return 0; | |
650 | } | |
651 | ||
652 | new_nr = (table ? table->nr : 1) * 4; | |
db446a08 BL |
653 | spin_unlock(&mm->ioctx_lock); |
654 | ||
655 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * | |
656 | new_nr, GFP_KERNEL); | |
657 | if (!table) | |
658 | return -ENOMEM; | |
659 | ||
660 | table->nr = new_nr; | |
661 | ||
662 | spin_lock(&mm->ioctx_lock); | |
855ef0de | 663 | old = rcu_dereference_raw(mm->ioctx_table); |
db446a08 BL |
664 | |
665 | if (!old) { | |
666 | rcu_assign_pointer(mm->ioctx_table, table); | |
667 | } else if (table->nr > old->nr) { | |
668 | memcpy(table->table, old->table, | |
669 | old->nr * sizeof(struct kioctx *)); | |
670 | ||
671 | rcu_assign_pointer(mm->ioctx_table, table); | |
672 | kfree_rcu(old, rcu); | |
673 | } else { | |
674 | kfree(table); | |
675 | table = old; | |
676 | } | |
677 | } | |
678 | } | |
679 | ||
e34ecee2 KO |
680 | static void aio_nr_sub(unsigned nr) |
681 | { | |
682 | spin_lock(&aio_nr_lock); | |
683 | if (WARN_ON(aio_nr - nr > aio_nr)) | |
684 | aio_nr = 0; | |
685 | else | |
686 | aio_nr -= nr; | |
687 | spin_unlock(&aio_nr_lock); | |
688 | } | |
689 | ||
1da177e4 LT |
690 | /* ioctx_alloc |
691 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
692 | */ | |
693 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
694 | { | |
41003a7b | 695 | struct mm_struct *mm = current->mm; |
1da177e4 | 696 | struct kioctx *ctx; |
e23754f8 | 697 | int err = -ENOMEM; |
1da177e4 | 698 | |
2a8a9867 MFO |
699 | /* |
700 | * Store the original nr_events -- what userspace passed to io_setup(), | |
701 | * for counting against the global limit -- before it changes. | |
702 | */ | |
703 | unsigned int max_reqs = nr_events; | |
704 | ||
e1bdd5f2 KO |
705 | /* |
706 | * We keep track of the number of available ringbuffer slots, to prevent | |
707 | * overflow (reqs_available), and we also use percpu counters for this. | |
708 | * | |
709 | * So since up to half the slots might be on other cpu's percpu counters | |
710 | * and unavailable, double nr_events so userspace sees what they | |
711 | * expected: additionally, we move req_batch slots to/from percpu | |
712 | * counters at a time, so make sure that isn't 0: | |
713 | */ | |
714 | nr_events = max(nr_events, num_possible_cpus() * 4); | |
715 | nr_events *= 2; | |
716 | ||
1da177e4 | 717 | /* Prevent overflows */ |
08397acd | 718 | if (nr_events > (0x10000000U / sizeof(struct io_event))) { |
1da177e4 LT |
719 | pr_debug("ENOMEM: nr_events too high\n"); |
720 | return ERR_PTR(-EINVAL); | |
721 | } | |
722 | ||
2a8a9867 | 723 | if (!nr_events || (unsigned long)max_reqs > aio_max_nr) |
1da177e4 LT |
724 | return ERR_PTR(-EAGAIN); |
725 | ||
c3762229 | 726 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
1da177e4 LT |
727 | if (!ctx) |
728 | return ERR_PTR(-ENOMEM); | |
729 | ||
2a8a9867 | 730 | ctx->max_reqs = max_reqs; |
1da177e4 | 731 | |
1da177e4 | 732 | spin_lock_init(&ctx->ctx_lock); |
0460fef2 | 733 | spin_lock_init(&ctx->completion_lock); |
58c85dc2 | 734 | mutex_init(&ctx->ring_lock); |
fa8a53c3 BL |
735 | /* Protect against page migration throughout kiotx setup by keeping |
736 | * the ring_lock mutex held until setup is complete. */ | |
737 | mutex_lock(&ctx->ring_lock); | |
1da177e4 LT |
738 | init_waitqueue_head(&ctx->wait); |
739 | ||
740 | INIT_LIST_HEAD(&ctx->active_reqs); | |
1da177e4 | 741 | |
2aad2a86 | 742 | if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) |
fa8a53c3 BL |
743 | goto err; |
744 | ||
2aad2a86 | 745 | if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) |
fa8a53c3 BL |
746 | goto err; |
747 | ||
e1bdd5f2 KO |
748 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
749 | if (!ctx->cpu) | |
e34ecee2 | 750 | goto err; |
1da177e4 | 751 | |
2a8a9867 | 752 | err = aio_setup_ring(ctx, nr_events); |
fa8a53c3 | 753 | if (err < 0) |
e34ecee2 | 754 | goto err; |
e1bdd5f2 | 755 | |
34e83fc6 | 756 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
e1bdd5f2 | 757 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
6878ea72 BL |
758 | if (ctx->req_batch < 1) |
759 | ctx->req_batch = 1; | |
34e83fc6 | 760 | |
1da177e4 | 761 | /* limit the number of system wide aios */ |
9fa1cb39 | 762 | spin_lock(&aio_nr_lock); |
2a8a9867 MFO |
763 | if (aio_nr + ctx->max_reqs > aio_max_nr || |
764 | aio_nr + ctx->max_reqs < aio_nr) { | |
9fa1cb39 | 765 | spin_unlock(&aio_nr_lock); |
e34ecee2 | 766 | err = -EAGAIN; |
d1b94327 | 767 | goto err_ctx; |
2dd542b7 AV |
768 | } |
769 | aio_nr += ctx->max_reqs; | |
9fa1cb39 | 770 | spin_unlock(&aio_nr_lock); |
1da177e4 | 771 | |
1881686f BL |
772 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
773 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ | |
723be6e3 | 774 | |
da90382c BL |
775 | err = ioctx_add_table(ctx, mm); |
776 | if (err) | |
e34ecee2 | 777 | goto err_cleanup; |
da90382c | 778 | |
fa8a53c3 BL |
779 | /* Release the ring_lock mutex now that all setup is complete. */ |
780 | mutex_unlock(&ctx->ring_lock); | |
781 | ||
caf4167a | 782 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
58c85dc2 | 783 | ctx, ctx->user_id, mm, ctx->nr_events); |
1da177e4 LT |
784 | return ctx; |
785 | ||
e34ecee2 KO |
786 | err_cleanup: |
787 | aio_nr_sub(ctx->max_reqs); | |
d1b94327 | 788 | err_ctx: |
deeb8525 AV |
789 | atomic_set(&ctx->dead, 1); |
790 | if (ctx->mmap_size) | |
791 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
d1b94327 | 792 | aio_free_ring(ctx); |
e34ecee2 | 793 | err: |
fa8a53c3 | 794 | mutex_unlock(&ctx->ring_lock); |
e1bdd5f2 | 795 | free_percpu(ctx->cpu); |
9a1049da TH |
796 | percpu_ref_exit(&ctx->reqs); |
797 | percpu_ref_exit(&ctx->users); | |
1da177e4 | 798 | kmem_cache_free(kioctx_cachep, ctx); |
caf4167a | 799 | pr_debug("error allocating ioctx %d\n", err); |
e23754f8 | 800 | return ERR_PTR(err); |
1da177e4 LT |
801 | } |
802 | ||
36f55889 KO |
803 | /* kill_ioctx |
804 | * Cancels all outstanding aio requests on an aio context. Used | |
805 | * when the processes owning a context have all exited to encourage | |
806 | * the rapid destruction of the kioctx. | |
807 | */ | |
fb2d4483 | 808 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
dc48e56d | 809 | struct ctx_rq_wait *wait) |
36f55889 | 810 | { |
fa88b6f8 | 811 | struct kioctx_table *table; |
db446a08 | 812 | |
b2edffdd AV |
813 | spin_lock(&mm->ioctx_lock); |
814 | if (atomic_xchg(&ctx->dead, 1)) { | |
815 | spin_unlock(&mm->ioctx_lock); | |
fa88b6f8 | 816 | return -EINVAL; |
b2edffdd | 817 | } |
db446a08 | 818 | |
855ef0de | 819 | table = rcu_dereference_raw(mm->ioctx_table); |
d0264c01 TH |
820 | WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); |
821 | RCU_INIT_POINTER(table->table[ctx->id], NULL); | |
fa88b6f8 | 822 | spin_unlock(&mm->ioctx_lock); |
4fcc712f | 823 | |
a6d7cff4 | 824 | /* free_ioctx_reqs() will do the necessary RCU synchronization */ |
fa88b6f8 | 825 | wake_up_all(&ctx->wait); |
4fcc712f | 826 | |
fa88b6f8 BL |
827 | /* |
828 | * It'd be more correct to do this in free_ioctx(), after all | |
829 | * the outstanding kiocbs have finished - but by then io_destroy | |
830 | * has already returned, so io_setup() could potentially return | |
831 | * -EAGAIN with no ioctxs actually in use (as far as userspace | |
832 | * could tell). | |
833 | */ | |
834 | aio_nr_sub(ctx->max_reqs); | |
4fcc712f | 835 | |
fa88b6f8 BL |
836 | if (ctx->mmap_size) |
837 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
fb2d4483 | 838 | |
dc48e56d | 839 | ctx->rq_wait = wait; |
fa88b6f8 BL |
840 | percpu_ref_kill(&ctx->users); |
841 | return 0; | |
1da177e4 LT |
842 | } |
843 | ||
36f55889 KO |
844 | /* |
845 | * exit_aio: called when the last user of mm goes away. At this point, there is | |
846 | * no way for any new requests to be submited or any of the io_* syscalls to be | |
847 | * called on the context. | |
848 | * | |
849 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on | |
850 | * them. | |
1da177e4 | 851 | */ |
fc9b52cd | 852 | void exit_aio(struct mm_struct *mm) |
1da177e4 | 853 | { |
4b70ac5f | 854 | struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); |
dc48e56d JA |
855 | struct ctx_rq_wait wait; |
856 | int i, skipped; | |
db446a08 | 857 | |
4b70ac5f ON |
858 | if (!table) |
859 | return; | |
db446a08 | 860 | |
dc48e56d JA |
861 | atomic_set(&wait.count, table->nr); |
862 | init_completion(&wait.comp); | |
863 | ||
864 | skipped = 0; | |
4b70ac5f | 865 | for (i = 0; i < table->nr; ++i) { |
d0264c01 TH |
866 | struct kioctx *ctx = |
867 | rcu_dereference_protected(table->table[i], true); | |
abf137dd | 868 | |
dc48e56d JA |
869 | if (!ctx) { |
870 | skipped++; | |
4b70ac5f | 871 | continue; |
dc48e56d JA |
872 | } |
873 | ||
936af157 | 874 | /* |
4b70ac5f ON |
875 | * We don't need to bother with munmap() here - exit_mmap(mm) |
876 | * is coming and it'll unmap everything. And we simply can't, | |
877 | * this is not necessarily our ->mm. | |
878 | * Since kill_ioctx() uses non-zero ->mmap_size as indicator | |
879 | * that it needs to unmap the area, just set it to 0. | |
936af157 | 880 | */ |
58c85dc2 | 881 | ctx->mmap_size = 0; |
dc48e56d JA |
882 | kill_ioctx(mm, ctx, &wait); |
883 | } | |
36f55889 | 884 | |
dc48e56d | 885 | if (!atomic_sub_and_test(skipped, &wait.count)) { |
6098b45b | 886 | /* Wait until all IO for the context are done. */ |
dc48e56d | 887 | wait_for_completion(&wait.comp); |
1da177e4 | 888 | } |
4b70ac5f ON |
889 | |
890 | RCU_INIT_POINTER(mm->ioctx_table, NULL); | |
891 | kfree(table); | |
1da177e4 LT |
892 | } |
893 | ||
e1bdd5f2 KO |
894 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
895 | { | |
896 | struct kioctx_cpu *kcpu; | |
263782c1 | 897 | unsigned long flags; |
e1bdd5f2 | 898 | |
263782c1 | 899 | local_irq_save(flags); |
be6fb451 | 900 | kcpu = this_cpu_ptr(ctx->cpu); |
e1bdd5f2 | 901 | kcpu->reqs_available += nr; |
263782c1 | 902 | |
e1bdd5f2 KO |
903 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
904 | kcpu->reqs_available -= ctx->req_batch; | |
905 | atomic_add(ctx->req_batch, &ctx->reqs_available); | |
906 | } | |
907 | ||
263782c1 | 908 | local_irq_restore(flags); |
e1bdd5f2 KO |
909 | } |
910 | ||
432c7997 | 911 | static bool __get_reqs_available(struct kioctx *ctx) |
e1bdd5f2 KO |
912 | { |
913 | struct kioctx_cpu *kcpu; | |
914 | bool ret = false; | |
263782c1 | 915 | unsigned long flags; |
e1bdd5f2 | 916 | |
263782c1 | 917 | local_irq_save(flags); |
be6fb451 | 918 | kcpu = this_cpu_ptr(ctx->cpu); |
e1bdd5f2 KO |
919 | if (!kcpu->reqs_available) { |
920 | int old, avail = atomic_read(&ctx->reqs_available); | |
921 | ||
922 | do { | |
923 | if (avail < ctx->req_batch) | |
924 | goto out; | |
925 | ||
926 | old = avail; | |
927 | avail = atomic_cmpxchg(&ctx->reqs_available, | |
928 | avail, avail - ctx->req_batch); | |
929 | } while (avail != old); | |
930 | ||
931 | kcpu->reqs_available += ctx->req_batch; | |
932 | } | |
933 | ||
934 | ret = true; | |
935 | kcpu->reqs_available--; | |
936 | out: | |
263782c1 | 937 | local_irq_restore(flags); |
e1bdd5f2 KO |
938 | return ret; |
939 | } | |
940 | ||
d856f32a BL |
941 | /* refill_reqs_available |
942 | * Updates the reqs_available reference counts used for tracking the | |
943 | * number of free slots in the completion ring. This can be called | |
944 | * from aio_complete() (to optimistically update reqs_available) or | |
945 | * from aio_get_req() (the we're out of events case). It must be | |
946 | * called holding ctx->completion_lock. | |
947 | */ | |
948 | static void refill_reqs_available(struct kioctx *ctx, unsigned head, | |
949 | unsigned tail) | |
950 | { | |
951 | unsigned events_in_ring, completed; | |
952 | ||
953 | /* Clamp head since userland can write to it. */ | |
954 | head %= ctx->nr_events; | |
955 | if (head <= tail) | |
956 | events_in_ring = tail - head; | |
957 | else | |
958 | events_in_ring = ctx->nr_events - (head - tail); | |
959 | ||
960 | completed = ctx->completed_events; | |
961 | if (events_in_ring < completed) | |
962 | completed -= events_in_ring; | |
963 | else | |
964 | completed = 0; | |
965 | ||
966 | if (!completed) | |
967 | return; | |
968 | ||
969 | ctx->completed_events -= completed; | |
970 | put_reqs_available(ctx, completed); | |
971 | } | |
972 | ||
973 | /* user_refill_reqs_available | |
974 | * Called to refill reqs_available when aio_get_req() encounters an | |
975 | * out of space in the completion ring. | |
976 | */ | |
977 | static void user_refill_reqs_available(struct kioctx *ctx) | |
978 | { | |
979 | spin_lock_irq(&ctx->completion_lock); | |
980 | if (ctx->completed_events) { | |
981 | struct aio_ring *ring; | |
982 | unsigned head; | |
983 | ||
984 | /* Access of ring->head may race with aio_read_events_ring() | |
985 | * here, but that's okay since whether we read the old version | |
986 | * or the new version, and either will be valid. The important | |
987 | * part is that head cannot pass tail since we prevent | |
988 | * aio_complete() from updating tail by holding | |
989 | * ctx->completion_lock. Even if head is invalid, the check | |
990 | * against ctx->completed_events below will make sure we do the | |
991 | * safe/right thing. | |
992 | */ | |
993 | ring = kmap_atomic(ctx->ring_pages[0]); | |
994 | head = ring->head; | |
995 | kunmap_atomic(ring); | |
996 | ||
997 | refill_reqs_available(ctx, head, ctx->tail); | |
998 | } | |
999 | ||
1000 | spin_unlock_irq(&ctx->completion_lock); | |
1001 | } | |
1002 | ||
432c7997 CH |
1003 | static bool get_reqs_available(struct kioctx *ctx) |
1004 | { | |
1005 | if (__get_reqs_available(ctx)) | |
1006 | return true; | |
1007 | user_refill_reqs_available(ctx); | |
1008 | return __get_reqs_available(ctx); | |
1009 | } | |
1010 | ||
1da177e4 | 1011 | /* aio_get_req |
57282d8f KO |
1012 | * Allocate a slot for an aio request. |
1013 | * Returns NULL if no requests are free. | |
1da177e4 | 1014 | */ |
04b2fa9f | 1015 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
1da177e4 | 1016 | { |
04b2fa9f | 1017 | struct aio_kiocb *req; |
a1c8eae7 | 1018 | |
2bc4ca9b | 1019 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); |
1da177e4 | 1020 | if (unlikely(!req)) |
432c7997 | 1021 | return NULL; |
1da177e4 | 1022 | |
e34ecee2 | 1023 | percpu_ref_get(&ctx->reqs); |
2bc4ca9b | 1024 | req->ki_ctx = ctx; |
75321b50 | 1025 | INIT_LIST_HEAD(&req->ki_list); |
9018ccc4 | 1026 | refcount_set(&req->ki_refcnt, 0); |
2bc4ca9b | 1027 | req->ki_eventfd = NULL; |
080d676d | 1028 | return req; |
1da177e4 LT |
1029 | } |
1030 | ||
d5470b59 | 1031 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1da177e4 | 1032 | { |
db446a08 | 1033 | struct aio_ring __user *ring = (void __user *)ctx_id; |
abf137dd | 1034 | struct mm_struct *mm = current->mm; |
65c24491 | 1035 | struct kioctx *ctx, *ret = NULL; |
db446a08 BL |
1036 | struct kioctx_table *table; |
1037 | unsigned id; | |
1038 | ||
1039 | if (get_user(id, &ring->id)) | |
1040 | return NULL; | |
1da177e4 | 1041 | |
abf137dd | 1042 | rcu_read_lock(); |
db446a08 | 1043 | table = rcu_dereference(mm->ioctx_table); |
abf137dd | 1044 | |
db446a08 BL |
1045 | if (!table || id >= table->nr) |
1046 | goto out; | |
1da177e4 | 1047 | |
a538e3ff | 1048 | id = array_index_nospec(id, table->nr); |
d0264c01 | 1049 | ctx = rcu_dereference(table->table[id]); |
f30d704f | 1050 | if (ctx && ctx->user_id == ctx_id) { |
baf10564 AV |
1051 | if (percpu_ref_tryget_live(&ctx->users)) |
1052 | ret = ctx; | |
db446a08 BL |
1053 | } |
1054 | out: | |
abf137dd | 1055 | rcu_read_unlock(); |
65c24491 | 1056 | return ret; |
1da177e4 LT |
1057 | } |
1058 | ||
9018ccc4 CH |
1059 | static inline void iocb_put(struct aio_kiocb *iocb) |
1060 | { | |
1061 | if (refcount_read(&iocb->ki_refcnt) == 0 || | |
1062 | refcount_dec_and_test(&iocb->ki_refcnt)) { | |
1063 | percpu_ref_put(&iocb->ki_ctx->reqs); | |
1064 | kmem_cache_free(kiocb_cachep, iocb); | |
1065 | } | |
1066 | } | |
1067 | ||
875736bb JA |
1068 | static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, |
1069 | long res, long res2) | |
1070 | { | |
1071 | ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; | |
1072 | ev->data = iocb->ki_user_data; | |
1073 | ev->res = res; | |
1074 | ev->res2 = res2; | |
1075 | } | |
1076 | ||
1da177e4 LT |
1077 | /* aio_complete |
1078 | * Called when the io request on the given iocb is complete. | |
1da177e4 | 1079 | */ |
54843f87 | 1080 | static void aio_complete(struct aio_kiocb *iocb, long res, long res2) |
1da177e4 LT |
1081 | { |
1082 | struct kioctx *ctx = iocb->ki_ctx; | |
1da177e4 | 1083 | struct aio_ring *ring; |
21b40200 | 1084 | struct io_event *ev_page, *event; |
d856f32a | 1085 | unsigned tail, pos, head; |
1da177e4 | 1086 | unsigned long flags; |
1da177e4 | 1087 | |
0460fef2 KO |
1088 | /* |
1089 | * Add a completion event to the ring buffer. Must be done holding | |
4b30f07e | 1090 | * ctx->completion_lock to prevent other code from messing with the tail |
0460fef2 KO |
1091 | * pointer since we might be called from irq context. |
1092 | */ | |
1093 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
1094 | ||
58c85dc2 | 1095 | tail = ctx->tail; |
21b40200 KO |
1096 | pos = tail + AIO_EVENTS_OFFSET; |
1097 | ||
58c85dc2 | 1098 | if (++tail >= ctx->nr_events) |
4bf69b2a | 1099 | tail = 0; |
1da177e4 | 1100 | |
58c85dc2 | 1101 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
1102 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
1103 | ||
875736bb | 1104 | aio_fill_event(event, iocb, res, res2); |
1da177e4 | 1105 | |
21b40200 | 1106 | kunmap_atomic(ev_page); |
58c85dc2 | 1107 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
1108 | |
1109 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | |
04b2fa9f | 1110 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, |
caf4167a | 1111 | res, res2); |
1da177e4 LT |
1112 | |
1113 | /* after flagging the request as done, we | |
1114 | * must never even look at it again | |
1115 | */ | |
1116 | smp_wmb(); /* make event visible before updating tail */ | |
1117 | ||
58c85dc2 | 1118 | ctx->tail = tail; |
1da177e4 | 1119 | |
58c85dc2 | 1120 | ring = kmap_atomic(ctx->ring_pages[0]); |
d856f32a | 1121 | head = ring->head; |
21b40200 | 1122 | ring->tail = tail; |
e8e3c3d6 | 1123 | kunmap_atomic(ring); |
58c85dc2 | 1124 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 | 1125 | |
d856f32a BL |
1126 | ctx->completed_events++; |
1127 | if (ctx->completed_events > 1) | |
1128 | refill_reqs_available(ctx, head, tail); | |
0460fef2 KO |
1129 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
1130 | ||
21b40200 | 1131 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
8d1c98b0 DL |
1132 | |
1133 | /* | |
1134 | * Check if the user asked us to deliver the result through an | |
1135 | * eventfd. The eventfd_signal() function is safe to be called | |
1136 | * from IRQ context. | |
1137 | */ | |
54843f87 | 1138 | if (iocb->ki_eventfd) { |
8d1c98b0 | 1139 | eventfd_signal(iocb->ki_eventfd, 1); |
54843f87 CH |
1140 | eventfd_ctx_put(iocb->ki_eventfd); |
1141 | } | |
8d1c98b0 | 1142 | |
6cb2a210 QB |
1143 | /* |
1144 | * We have to order our ring_info tail store above and test | |
1145 | * of the wait list below outside the wait lock. This is | |
1146 | * like in wake_up_bit() where clearing a bit has to be | |
1147 | * ordered with the unlocked test. | |
1148 | */ | |
1149 | smp_mb(); | |
1150 | ||
1da177e4 LT |
1151 | if (waitqueue_active(&ctx->wait)) |
1152 | wake_up(&ctx->wait); | |
9018ccc4 | 1153 | iocb_put(iocb); |
1da177e4 LT |
1154 | } |
1155 | ||
2be4e7de | 1156 | /* aio_read_events_ring |
a31ad380 KO |
1157 | * Pull an event off of the ioctx's event ring. Returns the number of |
1158 | * events fetched | |
1da177e4 | 1159 | */ |
a31ad380 KO |
1160 | static long aio_read_events_ring(struct kioctx *ctx, |
1161 | struct io_event __user *event, long nr) | |
1da177e4 | 1162 | { |
1da177e4 | 1163 | struct aio_ring *ring; |
5ffac122 | 1164 | unsigned head, tail, pos; |
a31ad380 KO |
1165 | long ret = 0; |
1166 | int copy_ret; | |
1167 | ||
9c9ce763 DC |
1168 | /* |
1169 | * The mutex can block and wake us up and that will cause | |
1170 | * wait_event_interruptible_hrtimeout() to schedule without sleeping | |
1171 | * and repeat. This should be rare enough that it doesn't cause | |
1172 | * peformance issues. See the comment in read_events() for more detail. | |
1173 | */ | |
1174 | sched_annotate_sleep(); | |
58c85dc2 | 1175 | mutex_lock(&ctx->ring_lock); |
1da177e4 | 1176 | |
fa8a53c3 | 1177 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
58c85dc2 | 1178 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1179 | head = ring->head; |
5ffac122 | 1180 | tail = ring->tail; |
a31ad380 KO |
1181 | kunmap_atomic(ring); |
1182 | ||
2ff396be JM |
1183 | /* |
1184 | * Ensure that once we've read the current tail pointer, that | |
1185 | * we also see the events that were stored up to the tail. | |
1186 | */ | |
1187 | smp_rmb(); | |
1188 | ||
5ffac122 | 1189 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1da177e4 | 1190 | |
5ffac122 | 1191 | if (head == tail) |
1da177e4 LT |
1192 | goto out; |
1193 | ||
edfbbf38 BL |
1194 | head %= ctx->nr_events; |
1195 | tail %= ctx->nr_events; | |
1196 | ||
a31ad380 KO |
1197 | while (ret < nr) { |
1198 | long avail; | |
1199 | struct io_event *ev; | |
1200 | struct page *page; | |
1201 | ||
5ffac122 KO |
1202 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1203 | if (head == tail) | |
a31ad380 KO |
1204 | break; |
1205 | ||
a31ad380 | 1206 | pos = head + AIO_EVENTS_OFFSET; |
58c85dc2 | 1207 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
a31ad380 KO |
1208 | pos %= AIO_EVENTS_PER_PAGE; |
1209 | ||
d2988bd4 AV |
1210 | avail = min(avail, nr - ret); |
1211 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); | |
1212 | ||
a31ad380 KO |
1213 | ev = kmap(page); |
1214 | copy_ret = copy_to_user(event + ret, ev + pos, | |
1215 | sizeof(*ev) * avail); | |
1216 | kunmap(page); | |
1217 | ||
1218 | if (unlikely(copy_ret)) { | |
1219 | ret = -EFAULT; | |
1220 | goto out; | |
1221 | } | |
1222 | ||
1223 | ret += avail; | |
1224 | head += avail; | |
58c85dc2 | 1225 | head %= ctx->nr_events; |
1da177e4 | 1226 | } |
1da177e4 | 1227 | |
58c85dc2 | 1228 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1229 | ring->head = head; |
91d80a84 | 1230 | kunmap_atomic(ring); |
58c85dc2 | 1231 | flush_dcache_page(ctx->ring_pages[0]); |
a31ad380 | 1232 | |
5ffac122 | 1233 | pr_debug("%li h%u t%u\n", ret, head, tail); |
a31ad380 | 1234 | out: |
58c85dc2 | 1235 | mutex_unlock(&ctx->ring_lock); |
a31ad380 | 1236 | |
1da177e4 LT |
1237 | return ret; |
1238 | } | |
1239 | ||
a31ad380 KO |
1240 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1241 | struct io_event __user *event, long *i) | |
1da177e4 | 1242 | { |
a31ad380 | 1243 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1da177e4 | 1244 | |
a31ad380 KO |
1245 | if (ret > 0) |
1246 | *i += ret; | |
1da177e4 | 1247 | |
a31ad380 KO |
1248 | if (unlikely(atomic_read(&ctx->dead))) |
1249 | ret = -EINVAL; | |
1da177e4 | 1250 | |
a31ad380 KO |
1251 | if (!*i) |
1252 | *i = ret; | |
1da177e4 | 1253 | |
a31ad380 | 1254 | return ret < 0 || *i >= min_nr; |
1da177e4 LT |
1255 | } |
1256 | ||
a31ad380 | 1257 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1da177e4 | 1258 | struct io_event __user *event, |
fa2e62a5 | 1259 | ktime_t until) |
1da177e4 | 1260 | { |
a31ad380 | 1261 | long ret = 0; |
1da177e4 | 1262 | |
a31ad380 KO |
1263 | /* |
1264 | * Note that aio_read_events() is being called as the conditional - i.e. | |
1265 | * we're calling it after prepare_to_wait() has set task state to | |
1266 | * TASK_INTERRUPTIBLE. | |
1267 | * | |
1268 | * But aio_read_events() can block, and if it blocks it's going to flip | |
1269 | * the task state back to TASK_RUNNING. | |
1270 | * | |
1271 | * This should be ok, provided it doesn't flip the state back to | |
1272 | * TASK_RUNNING and return 0 too much - that causes us to spin. That | |
1273 | * will only happen if the mutex_lock() call blocks, and we then find | |
1274 | * the ringbuffer empty. So in practice we should be ok, but it's | |
1275 | * something to be aware of when touching this code. | |
1276 | */ | |
2456e855 | 1277 | if (until == 0) |
5f785de5 FZ |
1278 | aio_read_events(ctx, min_nr, nr, event, &ret); |
1279 | else | |
1280 | wait_event_interruptible_hrtimeout(ctx->wait, | |
1281 | aio_read_events(ctx, min_nr, nr, event, &ret), | |
1282 | until); | |
a31ad380 | 1283 | return ret; |
1da177e4 LT |
1284 | } |
1285 | ||
1da177e4 LT |
1286 | /* sys_io_setup: |
1287 | * Create an aio_context capable of receiving at least nr_events. | |
1288 | * ctxp must not point to an aio_context that already exists, and | |
1289 | * must be initialized to 0 prior to the call. On successful | |
1290 | * creation of the aio_context, *ctxp is filled in with the resulting | |
1291 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
1292 | * if the specified nr_events exceeds internal limits. May fail | |
1293 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
1294 | * of available events. May fail with -ENOMEM if insufficient kernel | |
1295 | * resources are available. May fail with -EFAULT if an invalid | |
1296 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
1297 | * implemented. | |
1298 | */ | |
002c8976 | 1299 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1da177e4 LT |
1300 | { |
1301 | struct kioctx *ioctx = NULL; | |
1302 | unsigned long ctx; | |
1303 | long ret; | |
1304 | ||
1305 | ret = get_user(ctx, ctxp); | |
1306 | if (unlikely(ret)) | |
1307 | goto out; | |
1308 | ||
1309 | ret = -EINVAL; | |
d55b5fda | 1310 | if (unlikely(ctx || nr_events == 0)) { |
acd88d4e | 1311 | pr_debug("EINVAL: ctx %lu nr_events %u\n", |
d55b5fda | 1312 | ctx, nr_events); |
1da177e4 LT |
1313 | goto out; |
1314 | } | |
1315 | ||
1316 | ioctx = ioctx_alloc(nr_events); | |
1317 | ret = PTR_ERR(ioctx); | |
1318 | if (!IS_ERR(ioctx)) { | |
1319 | ret = put_user(ioctx->user_id, ctxp); | |
a2e1859a | 1320 | if (ret) |
e02ba72a | 1321 | kill_ioctx(current->mm, ioctx, NULL); |
723be6e3 | 1322 | percpu_ref_put(&ioctx->users); |
1da177e4 LT |
1323 | } |
1324 | ||
1325 | out: | |
1326 | return ret; | |
1327 | } | |
1328 | ||
c00d2c7e AV |
1329 | #ifdef CONFIG_COMPAT |
1330 | COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) | |
1331 | { | |
1332 | struct kioctx *ioctx = NULL; | |
1333 | unsigned long ctx; | |
1334 | long ret; | |
1335 | ||
1336 | ret = get_user(ctx, ctx32p); | |
1337 | if (unlikely(ret)) | |
1338 | goto out; | |
1339 | ||
1340 | ret = -EINVAL; | |
1341 | if (unlikely(ctx || nr_events == 0)) { | |
1342 | pr_debug("EINVAL: ctx %lu nr_events %u\n", | |
1343 | ctx, nr_events); | |
1344 | goto out; | |
1345 | } | |
1346 | ||
1347 | ioctx = ioctx_alloc(nr_events); | |
1348 | ret = PTR_ERR(ioctx); | |
1349 | if (!IS_ERR(ioctx)) { | |
1350 | /* truncating is ok because it's a user address */ | |
1351 | ret = put_user((u32)ioctx->user_id, ctx32p); | |
1352 | if (ret) | |
1353 | kill_ioctx(current->mm, ioctx, NULL); | |
1354 | percpu_ref_put(&ioctx->users); | |
1355 | } | |
1356 | ||
1357 | out: | |
1358 | return ret; | |
1359 | } | |
1360 | #endif | |
1361 | ||
1da177e4 LT |
1362 | /* sys_io_destroy: |
1363 | * Destroy the aio_context specified. May cancel any outstanding | |
1364 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
642b5123 | 1365 | * implemented. May fail with -EINVAL if the context pointed to |
1da177e4 LT |
1366 | * is invalid. |
1367 | */ | |
002c8976 | 1368 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1da177e4 LT |
1369 | { |
1370 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
1371 | if (likely(NULL != ioctx)) { | |
dc48e56d | 1372 | struct ctx_rq_wait wait; |
fb2d4483 | 1373 | int ret; |
e02ba72a | 1374 | |
dc48e56d JA |
1375 | init_completion(&wait.comp); |
1376 | atomic_set(&wait.count, 1); | |
1377 | ||
e02ba72a AP |
1378 | /* Pass requests_done to kill_ioctx() where it can be set |
1379 | * in a thread-safe way. If we try to set it here then we have | |
1380 | * a race condition if two io_destroy() called simultaneously. | |
1381 | */ | |
dc48e56d | 1382 | ret = kill_ioctx(current->mm, ioctx, &wait); |
723be6e3 | 1383 | percpu_ref_put(&ioctx->users); |
e02ba72a AP |
1384 | |
1385 | /* Wait until all IO for the context are done. Otherwise kernel | |
1386 | * keep using user-space buffers even if user thinks the context | |
1387 | * is destroyed. | |
1388 | */ | |
fb2d4483 | 1389 | if (!ret) |
dc48e56d | 1390 | wait_for_completion(&wait.comp); |
e02ba72a | 1391 | |
fb2d4483 | 1392 | return ret; |
1da177e4 | 1393 | } |
acd88d4e | 1394 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1395 | return -EINVAL; |
1396 | } | |
1397 | ||
3c96c7f4 AV |
1398 | static void aio_remove_iocb(struct aio_kiocb *iocb) |
1399 | { | |
1400 | struct kioctx *ctx = iocb->ki_ctx; | |
1401 | unsigned long flags; | |
1402 | ||
1403 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
1404 | list_del(&iocb->ki_list); | |
1405 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
1406 | } | |
1407 | ||
54843f87 CH |
1408 | static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) |
1409 | { | |
1410 | struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); | |
1411 | ||
3c96c7f4 AV |
1412 | if (!list_empty_careful(&iocb->ki_list)) |
1413 | aio_remove_iocb(iocb); | |
1414 | ||
54843f87 CH |
1415 | if (kiocb->ki_flags & IOCB_WRITE) { |
1416 | struct inode *inode = file_inode(kiocb->ki_filp); | |
1417 | ||
1418 | /* | |
1419 | * Tell lockdep we inherited freeze protection from submission | |
1420 | * thread. | |
1421 | */ | |
1422 | if (S_ISREG(inode->i_mode)) | |
1423 | __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); | |
1424 | file_end_write(kiocb->ki_filp); | |
1425 | } | |
1426 | ||
1427 | fput(kiocb->ki_filp); | |
1428 | aio_complete(iocb, res, res2); | |
1429 | } | |
1430 | ||
88a6f18b | 1431 | static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) |
54843f87 CH |
1432 | { |
1433 | int ret; | |
1434 | ||
1435 | req->ki_filp = fget(iocb->aio_fildes); | |
1436 | if (unlikely(!req->ki_filp)) | |
1437 | return -EBADF; | |
1438 | req->ki_complete = aio_complete_rw; | |
1439 | req->ki_pos = iocb->aio_offset; | |
1440 | req->ki_flags = iocb_flags(req->ki_filp); | |
1441 | if (iocb->aio_flags & IOCB_FLAG_RESFD) | |
1442 | req->ki_flags |= IOCB_EVENTFD; | |
fc28724d | 1443 | req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp)); |
d9a08a9e AM |
1444 | if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { |
1445 | /* | |
1446 | * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then | |
1447 | * aio_reqprio is interpreted as an I/O scheduling | |
1448 | * class and priority. | |
1449 | */ | |
1450 | ret = ioprio_check_cap(iocb->aio_reqprio); | |
1451 | if (ret) { | |
9a6d9a62 | 1452 | pr_debug("aio ioprio check cap error: %d\n", ret); |
154989e4 | 1453 | goto out_fput; |
d9a08a9e AM |
1454 | } |
1455 | ||
1456 | req->ki_ioprio = iocb->aio_reqprio; | |
1457 | } else | |
76dc8913 | 1458 | req->ki_ioprio = get_current_ioprio(); |
d9a08a9e | 1459 | |
54843f87 CH |
1460 | ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); |
1461 | if (unlikely(ret)) | |
154989e4 CH |
1462 | goto out_fput; |
1463 | ||
1464 | req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ | |
1465 | return 0; | |
1466 | ||
1467 | out_fput: | |
1468 | fput(req->ki_filp); | |
54843f87 CH |
1469 | return ret; |
1470 | } | |
1471 | ||
88a6f18b | 1472 | static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec, |
89319d31 | 1473 | bool vectored, bool compat, struct iov_iter *iter) |
eed4e51f | 1474 | { |
89319d31 CH |
1475 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; |
1476 | size_t len = iocb->aio_nbytes; | |
1477 | ||
1478 | if (!vectored) { | |
1479 | ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); | |
1480 | *iovec = NULL; | |
1481 | return ret; | |
1482 | } | |
9d85cba7 JM |
1483 | #ifdef CONFIG_COMPAT |
1484 | if (compat) | |
89319d31 CH |
1485 | return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, |
1486 | iter); | |
9d85cba7 | 1487 | #endif |
89319d31 | 1488 | return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); |
eed4e51f BP |
1489 | } |
1490 | ||
9061d14a | 1491 | static inline void aio_rw_done(struct kiocb *req, ssize_t ret) |
89319d31 CH |
1492 | { |
1493 | switch (ret) { | |
1494 | case -EIOCBQUEUED: | |
9061d14a | 1495 | break; |
89319d31 CH |
1496 | case -ERESTARTSYS: |
1497 | case -ERESTARTNOINTR: | |
1498 | case -ERESTARTNOHAND: | |
1499 | case -ERESTART_RESTARTBLOCK: | |
1500 | /* | |
1501 | * There's no easy way to restart the syscall since other AIO's | |
1502 | * may be already running. Just fail this IO with EINTR. | |
1503 | */ | |
1504 | ret = -EINTR; | |
1505 | /*FALLTHRU*/ | |
1506 | default: | |
bc9bff61 | 1507 | req->ki_complete(req, ret, 0); |
89319d31 CH |
1508 | } |
1509 | } | |
1510 | ||
88a6f18b JA |
1511 | static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, |
1512 | bool vectored, bool compat) | |
1da177e4 | 1513 | { |
00fefb9c | 1514 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
293bc982 | 1515 | struct iov_iter iter; |
54843f87 | 1516 | struct file *file; |
89319d31 | 1517 | ssize_t ret; |
1da177e4 | 1518 | |
54843f87 CH |
1519 | ret = aio_prep_rw(req, iocb); |
1520 | if (ret) | |
1521 | return ret; | |
1522 | file = req->ki_filp; | |
1523 | ||
1524 | ret = -EBADF; | |
89319d31 | 1525 | if (unlikely(!(file->f_mode & FMODE_READ))) |
54843f87 CH |
1526 | goto out_fput; |
1527 | ret = -EINVAL; | |
89319d31 | 1528 | if (unlikely(!file->f_op->read_iter)) |
54843f87 | 1529 | goto out_fput; |
73a7075e | 1530 | |
89319d31 CH |
1531 | ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); |
1532 | if (ret) | |
54843f87 | 1533 | goto out_fput; |
89319d31 CH |
1534 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); |
1535 | if (!ret) | |
9061d14a | 1536 | aio_rw_done(req, call_read_iter(file, req, &iter)); |
89319d31 | 1537 | kfree(iovec); |
54843f87 | 1538 | out_fput: |
9061d14a | 1539 | if (unlikely(ret)) |
54843f87 | 1540 | fput(file); |
89319d31 CH |
1541 | return ret; |
1542 | } | |
73a7075e | 1543 | |
88a6f18b JA |
1544 | static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, |
1545 | bool vectored, bool compat) | |
89319d31 | 1546 | { |
89319d31 CH |
1547 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
1548 | struct iov_iter iter; | |
54843f87 | 1549 | struct file *file; |
89319d31 | 1550 | ssize_t ret; |
41ef4eb8 | 1551 | |
54843f87 CH |
1552 | ret = aio_prep_rw(req, iocb); |
1553 | if (ret) | |
1554 | return ret; | |
1555 | file = req->ki_filp; | |
1556 | ||
1557 | ret = -EBADF; | |
89319d31 | 1558 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
54843f87 CH |
1559 | goto out_fput; |
1560 | ret = -EINVAL; | |
89319d31 | 1561 | if (unlikely(!file->f_op->write_iter)) |
54843f87 | 1562 | goto out_fput; |
1da177e4 | 1563 | |
89319d31 CH |
1564 | ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); |
1565 | if (ret) | |
54843f87 | 1566 | goto out_fput; |
89319d31 CH |
1567 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); |
1568 | if (!ret) { | |
70fe2f48 | 1569 | /* |
92ce4728 | 1570 | * Open-code file_start_write here to grab freeze protection, |
54843f87 CH |
1571 | * which will be released by another thread in |
1572 | * aio_complete_rw(). Fool lockdep by telling it the lock got | |
1573 | * released so that it doesn't complain about the held lock when | |
1574 | * we return to userspace. | |
70fe2f48 | 1575 | */ |
92ce4728 CH |
1576 | if (S_ISREG(file_inode(file)->i_mode)) { |
1577 | __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true); | |
a12f1ae6 | 1578 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); |
92ce4728 CH |
1579 | } |
1580 | req->ki_flags |= IOCB_WRITE; | |
9061d14a | 1581 | aio_rw_done(req, call_write_iter(file, req, &iter)); |
41ef4eb8 | 1582 | } |
89319d31 | 1583 | kfree(iovec); |
54843f87 | 1584 | out_fput: |
9061d14a | 1585 | if (unlikely(ret)) |
54843f87 | 1586 | fput(file); |
89319d31 | 1587 | return ret; |
1da177e4 LT |
1588 | } |
1589 | ||
a3c0d439 CH |
1590 | static void aio_fsync_work(struct work_struct *work) |
1591 | { | |
1592 | struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); | |
1593 | int ret; | |
1594 | ||
1595 | ret = vfs_fsync(req->file, req->datasync); | |
1596 | fput(req->file); | |
1597 | aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); | |
1598 | } | |
1599 | ||
88a6f18b JA |
1600 | static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, |
1601 | bool datasync) | |
a3c0d439 CH |
1602 | { |
1603 | if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || | |
1604 | iocb->aio_rw_flags)) | |
1605 | return -EINVAL; | |
a11e1d43 | 1606 | |
a3c0d439 CH |
1607 | req->file = fget(iocb->aio_fildes); |
1608 | if (unlikely(!req->file)) | |
1609 | return -EBADF; | |
1610 | if (unlikely(!req->file->f_op->fsync)) { | |
1611 | fput(req->file); | |
1612 | return -EINVAL; | |
1613 | } | |
1614 | ||
1615 | req->datasync = datasync; | |
1616 | INIT_WORK(&req->work, aio_fsync_work); | |
1617 | schedule_work(&req->work); | |
9061d14a | 1618 | return 0; |
a3c0d439 CH |
1619 | } |
1620 | ||
bfe4037e CH |
1621 | static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) |
1622 | { | |
1623 | struct file *file = iocb->poll.file; | |
1624 | ||
1625 | aio_complete(iocb, mangle_poll(mask), 0); | |
1626 | fput(file); | |
1627 | } | |
1628 | ||
1629 | static void aio_poll_complete_work(struct work_struct *work) | |
1630 | { | |
1631 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); | |
1632 | struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); | |
1633 | struct poll_table_struct pt = { ._key = req->events }; | |
1634 | struct kioctx *ctx = iocb->ki_ctx; | |
1635 | __poll_t mask = 0; | |
1636 | ||
1637 | if (!READ_ONCE(req->cancelled)) | |
1638 | mask = vfs_poll(req->file, &pt) & req->events; | |
1639 | ||
1640 | /* | |
1641 | * Note that ->ki_cancel callers also delete iocb from active_reqs after | |
1642 | * calling ->ki_cancel. We need the ctx_lock roundtrip here to | |
1643 | * synchronize with them. In the cancellation case the list_del_init | |
1644 | * itself is not actually needed, but harmless so we keep it in to | |
1645 | * avoid further branches in the fast path. | |
1646 | */ | |
1647 | spin_lock_irq(&ctx->ctx_lock); | |
1648 | if (!mask && !READ_ONCE(req->cancelled)) { | |
1649 | add_wait_queue(req->head, &req->wait); | |
1650 | spin_unlock_irq(&ctx->ctx_lock); | |
1651 | return; | |
1652 | } | |
1653 | list_del_init(&iocb->ki_list); | |
1654 | spin_unlock_irq(&ctx->ctx_lock); | |
1655 | ||
1656 | aio_poll_complete(iocb, mask); | |
1657 | } | |
1658 | ||
1659 | /* assumes we are called with irqs disabled */ | |
1660 | static int aio_poll_cancel(struct kiocb *iocb) | |
1661 | { | |
1662 | struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); | |
1663 | struct poll_iocb *req = &aiocb->poll; | |
1664 | ||
1665 | spin_lock(&req->head->lock); | |
1666 | WRITE_ONCE(req->cancelled, true); | |
1667 | if (!list_empty(&req->wait.entry)) { | |
1668 | list_del_init(&req->wait.entry); | |
1669 | schedule_work(&aiocb->poll.work); | |
1670 | } | |
1671 | spin_unlock(&req->head->lock); | |
1672 | ||
1673 | return 0; | |
1674 | } | |
1675 | ||
1676 | static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | |
1677 | void *key) | |
1678 | { | |
1679 | struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); | |
e8693bcf | 1680 | struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); |
bfe4037e CH |
1681 | __poll_t mask = key_to_poll(key); |
1682 | ||
1683 | req->woken = true; | |
1684 | ||
1685 | /* for instances that support it check for an event match first: */ | |
e8693bcf CH |
1686 | if (mask) { |
1687 | if (!(mask & req->events)) | |
1688 | return 0; | |
1689 | ||
1690 | /* try to complete the iocb inline if we can: */ | |
1691 | if (spin_trylock(&iocb->ki_ctx->ctx_lock)) { | |
1692 | list_del(&iocb->ki_list); | |
1693 | spin_unlock(&iocb->ki_ctx->ctx_lock); | |
1694 | ||
1695 | list_del_init(&req->wait.entry); | |
1696 | aio_poll_complete(iocb, mask); | |
1697 | return 1; | |
1698 | } | |
1699 | } | |
bfe4037e CH |
1700 | |
1701 | list_del_init(&req->wait.entry); | |
1702 | schedule_work(&req->work); | |
1703 | return 1; | |
1704 | } | |
1705 | ||
1706 | struct aio_poll_table { | |
1707 | struct poll_table_struct pt; | |
1708 | struct aio_kiocb *iocb; | |
1709 | int error; | |
1710 | }; | |
1711 | ||
1712 | static void | |
1713 | aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |
1714 | struct poll_table_struct *p) | |
1715 | { | |
1716 | struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); | |
1717 | ||
1718 | /* multiple wait queues per file are not supported */ | |
1719 | if (unlikely(pt->iocb->poll.head)) { | |
1720 | pt->error = -EINVAL; | |
1721 | return; | |
1722 | } | |
1723 | ||
1724 | pt->error = 0; | |
1725 | pt->iocb->poll.head = head; | |
1726 | add_wait_queue(head, &pt->iocb->poll.wait); | |
1727 | } | |
1728 | ||
88a6f18b | 1729 | static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) |
bfe4037e CH |
1730 | { |
1731 | struct kioctx *ctx = aiocb->ki_ctx; | |
1732 | struct poll_iocb *req = &aiocb->poll; | |
1733 | struct aio_poll_table apt; | |
1734 | __poll_t mask; | |
1735 | ||
1736 | /* reject any unknown events outside the normal event mask. */ | |
1737 | if ((u16)iocb->aio_buf != iocb->aio_buf) | |
1738 | return -EINVAL; | |
1739 | /* reject fields that are not defined for poll */ | |
1740 | if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) | |
1741 | return -EINVAL; | |
1742 | ||
1743 | INIT_WORK(&req->work, aio_poll_complete_work); | |
1744 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; | |
1745 | req->file = fget(iocb->aio_fildes); | |
1746 | if (unlikely(!req->file)) | |
1747 | return -EBADF; | |
1748 | ||
2bc4ca9b JA |
1749 | req->head = NULL; |
1750 | req->woken = false; | |
1751 | req->cancelled = false; | |
1752 | ||
bfe4037e CH |
1753 | apt.pt._qproc = aio_poll_queue_proc; |
1754 | apt.pt._key = req->events; | |
1755 | apt.iocb = aiocb; | |
1756 | apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ | |
1757 | ||
1758 | /* initialized the list so that we can do list_empty checks */ | |
1759 | INIT_LIST_HEAD(&req->wait.entry); | |
1760 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); | |
1761 | ||
1762 | /* one for removal from waitqueue, one for this function */ | |
1763 | refcount_set(&aiocb->ki_refcnt, 2); | |
1764 | ||
1765 | mask = vfs_poll(req->file, &apt.pt) & req->events; | |
1766 | if (unlikely(!req->head)) { | |
1767 | /* we did not manage to set up a waitqueue, done */ | |
1768 | goto out; | |
1769 | } | |
1770 | ||
1771 | spin_lock_irq(&ctx->ctx_lock); | |
1772 | spin_lock(&req->head->lock); | |
1773 | if (req->woken) { | |
1774 | /* wake_up context handles the rest */ | |
1775 | mask = 0; | |
1776 | apt.error = 0; | |
1777 | } else if (mask || apt.error) { | |
1778 | /* if we get an error or a mask we are done */ | |
1779 | WARN_ON_ONCE(list_empty(&req->wait.entry)); | |
1780 | list_del_init(&req->wait.entry); | |
1781 | } else { | |
1782 | /* actually waiting for an event */ | |
1783 | list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | |
1784 | aiocb->ki_cancel = aio_poll_cancel; | |
1785 | } | |
1786 | spin_unlock(&req->head->lock); | |
1787 | spin_unlock_irq(&ctx->ctx_lock); | |
1788 | ||
1789 | out: | |
1790 | if (unlikely(apt.error)) { | |
1791 | fput(req->file); | |
1792 | return apt.error; | |
1793 | } | |
1794 | ||
1795 | if (mask) | |
1796 | aio_poll_complete(aiocb, mask); | |
1797 | iocb_put(aiocb); | |
1798 | return 0; | |
1799 | } | |
1800 | ||
88a6f18b JA |
1801 | static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, |
1802 | struct iocb __user *user_iocb, bool compat) | |
1da177e4 | 1803 | { |
04b2fa9f | 1804 | struct aio_kiocb *req; |
1da177e4 LT |
1805 | ssize_t ret; |
1806 | ||
1807 | /* enforce forwards compatibility on users */ | |
88a6f18b | 1808 | if (unlikely(iocb->aio_reserved2)) { |
caf4167a | 1809 | pr_debug("EINVAL: reserve field set\n"); |
1da177e4 LT |
1810 | return -EINVAL; |
1811 | } | |
1812 | ||
1813 | /* prevent overflows */ | |
1814 | if (unlikely( | |
88a6f18b JA |
1815 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || |
1816 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1817 | ((ssize_t)iocb->aio_nbytes < 0) | |
1da177e4 | 1818 | )) { |
acd88d4e | 1819 | pr_debug("EINVAL: overflow check\n"); |
1da177e4 LT |
1820 | return -EINVAL; |
1821 | } | |
1822 | ||
432c7997 CH |
1823 | if (!get_reqs_available(ctx)) |
1824 | return -EAGAIN; | |
1825 | ||
1826 | ret = -EAGAIN; | |
41ef4eb8 | 1827 | req = aio_get_req(ctx); |
1d98ebfc | 1828 | if (unlikely(!req)) |
432c7997 | 1829 | goto out_put_reqs_available; |
1d98ebfc | 1830 | |
88a6f18b | 1831 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
9c3060be DL |
1832 | /* |
1833 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1834 | * instance of the file* now. The file descriptor must be | |
1835 | * an eventfd() fd, and will be signaled for each completed | |
1836 | * event using the eventfd_signal() function. | |
1837 | */ | |
88a6f18b | 1838 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
801678c5 | 1839 | if (IS_ERR(req->ki_eventfd)) { |
9c3060be | 1840 | ret = PTR_ERR(req->ki_eventfd); |
87c3a86e | 1841 | req->ki_eventfd = NULL; |
9c3060be DL |
1842 | goto out_put_req; |
1843 | } | |
9830f4be GR |
1844 | } |
1845 | ||
8a660890 | 1846 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1da177e4 | 1847 | if (unlikely(ret)) { |
caf4167a | 1848 | pr_debug("EFAULT: aio_key\n"); |
1da177e4 LT |
1849 | goto out_put_req; |
1850 | } | |
1851 | ||
04b2fa9f | 1852 | req->ki_user_iocb = user_iocb; |
88a6f18b | 1853 | req->ki_user_data = iocb->aio_data; |
1da177e4 | 1854 | |
88a6f18b | 1855 | switch (iocb->aio_lio_opcode) { |
89319d31 | 1856 | case IOCB_CMD_PREAD: |
88a6f18b | 1857 | ret = aio_read(&req->rw, iocb, false, compat); |
89319d31 CH |
1858 | break; |
1859 | case IOCB_CMD_PWRITE: | |
88a6f18b | 1860 | ret = aio_write(&req->rw, iocb, false, compat); |
89319d31 CH |
1861 | break; |
1862 | case IOCB_CMD_PREADV: | |
88a6f18b | 1863 | ret = aio_read(&req->rw, iocb, true, compat); |
89319d31 CH |
1864 | break; |
1865 | case IOCB_CMD_PWRITEV: | |
88a6f18b | 1866 | ret = aio_write(&req->rw, iocb, true, compat); |
89319d31 | 1867 | break; |
a3c0d439 | 1868 | case IOCB_CMD_FSYNC: |
88a6f18b | 1869 | ret = aio_fsync(&req->fsync, iocb, false); |
a3c0d439 CH |
1870 | break; |
1871 | case IOCB_CMD_FDSYNC: | |
88a6f18b | 1872 | ret = aio_fsync(&req->fsync, iocb, true); |
ac060cba | 1873 | break; |
bfe4037e | 1874 | case IOCB_CMD_POLL: |
88a6f18b | 1875 | ret = aio_poll(req, iocb); |
bfe4037e | 1876 | break; |
89319d31 | 1877 | default: |
88a6f18b | 1878 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); |
89319d31 CH |
1879 | ret = -EINVAL; |
1880 | break; | |
1881 | } | |
41003a7b | 1882 | |
92ce4728 | 1883 | /* |
9061d14a AV |
1884 | * If ret is 0, we'd either done aio_complete() ourselves or have |
1885 | * arranged for that to be done asynchronously. Anything non-zero | |
1886 | * means that we need to destroy req ourselves. | |
92ce4728 | 1887 | */ |
9061d14a | 1888 | if (ret) |
89319d31 | 1889 | goto out_put_req; |
1da177e4 | 1890 | return 0; |
1da177e4 | 1891 | out_put_req: |
54843f87 CH |
1892 | if (req->ki_eventfd) |
1893 | eventfd_ctx_put(req->ki_eventfd); | |
71ebc6fe | 1894 | iocb_put(req); |
432c7997 CH |
1895 | out_put_reqs_available: |
1896 | put_reqs_available(ctx, 1); | |
1da177e4 LT |
1897 | return ret; |
1898 | } | |
1899 | ||
88a6f18b JA |
1900 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1901 | bool compat) | |
1902 | { | |
1903 | struct iocb iocb; | |
1904 | ||
1905 | if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) | |
1906 | return -EFAULT; | |
1907 | ||
1908 | return __io_submit_one(ctx, &iocb, user_iocb, compat); | |
1909 | } | |
1910 | ||
67ba049f AV |
1911 | /* sys_io_submit: |
1912 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1913 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1914 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1915 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1916 | * is invalid for the file descriptor in the iocb. May fail with | |
1917 | * -EFAULT if any of the data structures point to invalid data. May | |
1918 | * fail with -EBADF if the file descriptor specified in the first | |
1919 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1920 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1921 | * fail with -ENOSYS if not implemented. | |
1922 | */ | |
1923 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1924 | struct iocb __user * __user *, iocbpp) | |
1da177e4 LT |
1925 | { |
1926 | struct kioctx *ctx; | |
1927 | long ret = 0; | |
080d676d | 1928 | int i = 0; |
9f5b9425 | 1929 | struct blk_plug plug; |
1da177e4 LT |
1930 | |
1931 | if (unlikely(nr < 0)) | |
1932 | return -EINVAL; | |
1933 | ||
1da177e4 LT |
1934 | ctx = lookup_ioctx(ctx_id); |
1935 | if (unlikely(!ctx)) { | |
caf4167a | 1936 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1937 | return -EINVAL; |
1938 | } | |
1939 | ||
1da92779 AV |
1940 | if (nr > ctx->nr_events) |
1941 | nr = ctx->nr_events; | |
1942 | ||
a79d40e9 JA |
1943 | if (nr > AIO_PLUG_THRESHOLD) |
1944 | blk_start_plug(&plug); | |
67ba049f | 1945 | for (i = 0; i < nr; i++) { |
1da177e4 | 1946 | struct iocb __user *user_iocb; |
1da177e4 | 1947 | |
67ba049f | 1948 | if (unlikely(get_user(user_iocb, iocbpp + i))) { |
1da177e4 LT |
1949 | ret = -EFAULT; |
1950 | break; | |
1951 | } | |
1952 | ||
67ba049f | 1953 | ret = io_submit_one(ctx, user_iocb, false); |
1da177e4 LT |
1954 | if (ret) |
1955 | break; | |
1956 | } | |
a79d40e9 JA |
1957 | if (nr > AIO_PLUG_THRESHOLD) |
1958 | blk_finish_plug(&plug); | |
1da177e4 | 1959 | |
723be6e3 | 1960 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
1961 | return i ? i : ret; |
1962 | } | |
1963 | ||
c00d2c7e | 1964 | #ifdef CONFIG_COMPAT |
c00d2c7e | 1965 | COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, |
67ba049f | 1966 | int, nr, compat_uptr_t __user *, iocbpp) |
c00d2c7e | 1967 | { |
67ba049f AV |
1968 | struct kioctx *ctx; |
1969 | long ret = 0; | |
1970 | int i = 0; | |
1971 | struct blk_plug plug; | |
c00d2c7e AV |
1972 | |
1973 | if (unlikely(nr < 0)) | |
1974 | return -EINVAL; | |
1975 | ||
67ba049f AV |
1976 | ctx = lookup_ioctx(ctx_id); |
1977 | if (unlikely(!ctx)) { | |
1978 | pr_debug("EINVAL: invalid context id\n"); | |
1979 | return -EINVAL; | |
1980 | } | |
1981 | ||
1da92779 AV |
1982 | if (nr > ctx->nr_events) |
1983 | nr = ctx->nr_events; | |
1984 | ||
a79d40e9 JA |
1985 | if (nr > AIO_PLUG_THRESHOLD) |
1986 | blk_start_plug(&plug); | |
67ba049f AV |
1987 | for (i = 0; i < nr; i++) { |
1988 | compat_uptr_t user_iocb; | |
1989 | ||
1990 | if (unlikely(get_user(user_iocb, iocbpp + i))) { | |
1991 | ret = -EFAULT; | |
1992 | break; | |
1993 | } | |
1994 | ||
1995 | ret = io_submit_one(ctx, compat_ptr(user_iocb), true); | |
1996 | if (ret) | |
1997 | break; | |
1998 | } | |
a79d40e9 JA |
1999 | if (nr > AIO_PLUG_THRESHOLD) |
2000 | blk_finish_plug(&plug); | |
67ba049f AV |
2001 | |
2002 | percpu_ref_put(&ctx->users); | |
2003 | return i ? i : ret; | |
c00d2c7e AV |
2004 | } |
2005 | #endif | |
2006 | ||
1da177e4 LT |
2007 | /* lookup_kiocb |
2008 | * Finds a given iocb for cancellation. | |
1da177e4 | 2009 | */ |
04b2fa9f | 2010 | static struct aio_kiocb * |
f3a2752a | 2011 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) |
1da177e4 | 2012 | { |
04b2fa9f | 2013 | struct aio_kiocb *kiocb; |
d00689af ZB |
2014 | |
2015 | assert_spin_locked(&ctx->ctx_lock); | |
2016 | ||
1da177e4 | 2017 | /* TODO: use a hash or array, this sucks. */ |
04b2fa9f CH |
2018 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
2019 | if (kiocb->ki_user_iocb == iocb) | |
1da177e4 LT |
2020 | return kiocb; |
2021 | } | |
2022 | return NULL; | |
2023 | } | |
2024 | ||
2025 | /* sys_io_cancel: | |
2026 | * Attempts to cancel an iocb previously passed to io_submit. If | |
2027 | * the operation is successfully cancelled, the resulting event is | |
2028 | * copied into the memory pointed to by result without being placed | |
2029 | * into the completion queue and 0 is returned. May fail with | |
2030 | * -EFAULT if any of the data structures pointed to are invalid. | |
2031 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
2032 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
2033 | * cancelled. Will fail with -ENOSYS if not implemented. | |
2034 | */ | |
002c8976 HC |
2035 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
2036 | struct io_event __user *, result) | |
1da177e4 | 2037 | { |
1da177e4 | 2038 | struct kioctx *ctx; |
04b2fa9f | 2039 | struct aio_kiocb *kiocb; |
888933f8 | 2040 | int ret = -EINVAL; |
1da177e4 | 2041 | u32 key; |
1da177e4 | 2042 | |
f3a2752a | 2043 | if (unlikely(get_user(key, &iocb->aio_key))) |
1da177e4 | 2044 | return -EFAULT; |
f3a2752a CH |
2045 | if (unlikely(key != KIOCB_KEY)) |
2046 | return -EINVAL; | |
1da177e4 LT |
2047 | |
2048 | ctx = lookup_ioctx(ctx_id); | |
2049 | if (unlikely(!ctx)) | |
2050 | return -EINVAL; | |
2051 | ||
2052 | spin_lock_irq(&ctx->ctx_lock); | |
f3a2752a | 2053 | kiocb = lookup_kiocb(ctx, iocb); |
888933f8 CH |
2054 | if (kiocb) { |
2055 | ret = kiocb->ki_cancel(&kiocb->rw); | |
2056 | list_del_init(&kiocb->ki_list); | |
2057 | } | |
1da177e4 LT |
2058 | spin_unlock_irq(&ctx->ctx_lock); |
2059 | ||
906b973c | 2060 | if (!ret) { |
bec68faa KO |
2061 | /* |
2062 | * The result argument is no longer used - the io_event is | |
2063 | * always delivered via the ring buffer. -EINPROGRESS indicates | |
2064 | * cancellation is progress: | |
906b973c | 2065 | */ |
bec68faa | 2066 | ret = -EINPROGRESS; |
906b973c | 2067 | } |
1da177e4 | 2068 | |
723be6e3 | 2069 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
2070 | |
2071 | return ret; | |
2072 | } | |
2073 | ||
fa2e62a5 DD |
2074 | static long do_io_getevents(aio_context_t ctx_id, |
2075 | long min_nr, | |
2076 | long nr, | |
2077 | struct io_event __user *events, | |
2078 | struct timespec64 *ts) | |
2079 | { | |
2080 | ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; | |
2081 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
2082 | long ret = -EINVAL; | |
2083 | ||
2084 | if (likely(ioctx)) { | |
2085 | if (likely(min_nr <= nr && min_nr >= 0)) | |
2086 | ret = read_events(ioctx, min_nr, nr, events, until); | |
2087 | percpu_ref_put(&ioctx->users); | |
2088 | } | |
2089 | ||
2090 | return ret; | |
2091 | } | |
2092 | ||
1da177e4 LT |
2093 | /* io_getevents: |
2094 | * Attempts to read at least min_nr events and up to nr events from | |
642b5123 ST |
2095 | * the completion queue for the aio_context specified by ctx_id. If |
2096 | * it succeeds, the number of read events is returned. May fail with | |
2097 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
2098 | * out of range, if timeout is out of range. May fail with -EFAULT | |
2099 | * if any of the memory specified is invalid. May return 0 or | |
2100 | * < min_nr if the timeout specified by timeout has elapsed | |
2101 | * before sufficient events are available, where timeout == NULL | |
2102 | * specifies an infinite timeout. Note that the timeout pointed to by | |
6900807c | 2103 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1da177e4 | 2104 | */ |
7a35397f DD |
2105 | #if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT) |
2106 | ||
002c8976 HC |
2107 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
2108 | long, min_nr, | |
2109 | long, nr, | |
2110 | struct io_event __user *, events, | |
7a35397f | 2111 | struct __kernel_timespec __user *, timeout) |
1da177e4 | 2112 | { |
fa2e62a5 | 2113 | struct timespec64 ts; |
7a074e96 CH |
2114 | int ret; |
2115 | ||
2116 | if (timeout && unlikely(get_timespec64(&ts, timeout))) | |
2117 | return -EFAULT; | |
2118 | ||
2119 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); | |
2120 | if (!ret && signal_pending(current)) | |
2121 | ret = -EINTR; | |
2122 | return ret; | |
2123 | } | |
1da177e4 | 2124 | |
7a35397f DD |
2125 | #endif |
2126 | ||
9ba546c0 CH |
2127 | struct __aio_sigset { |
2128 | const sigset_t __user *sigmask; | |
2129 | size_t sigsetsize; | |
2130 | }; | |
2131 | ||
7a074e96 CH |
2132 | SYSCALL_DEFINE6(io_pgetevents, |
2133 | aio_context_t, ctx_id, | |
2134 | long, min_nr, | |
2135 | long, nr, | |
2136 | struct io_event __user *, events, | |
7a35397f | 2137 | struct __kernel_timespec __user *, timeout, |
7a074e96 CH |
2138 | const struct __aio_sigset __user *, usig) |
2139 | { | |
2140 | struct __aio_sigset ksig = { NULL, }; | |
2141 | sigset_t ksigmask, sigsaved; | |
2142 | struct timespec64 ts; | |
2143 | int ret; | |
2144 | ||
2145 | if (timeout && unlikely(get_timespec64(&ts, timeout))) | |
2146 | return -EFAULT; | |
2147 | ||
2148 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | |
2149 | return -EFAULT; | |
2150 | ||
7a35397f DD |
2151 | ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); |
2152 | if (ret) | |
2153 | return ret; | |
7a074e96 CH |
2154 | |
2155 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); | |
7a35397f DD |
2156 | restore_user_sigmask(ksig.sigmask, &sigsaved); |
2157 | if (signal_pending(current) && !ret) | |
2158 | ret = -ERESTARTNOHAND; | |
7a074e96 | 2159 | |
7a35397f DD |
2160 | return ret; |
2161 | } | |
2162 | ||
2163 | #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) | |
2164 | ||
2165 | SYSCALL_DEFINE6(io_pgetevents_time32, | |
2166 | aio_context_t, ctx_id, | |
2167 | long, min_nr, | |
2168 | long, nr, | |
2169 | struct io_event __user *, events, | |
2170 | struct old_timespec32 __user *, timeout, | |
2171 | const struct __aio_sigset __user *, usig) | |
2172 | { | |
2173 | struct __aio_sigset ksig = { NULL, }; | |
2174 | sigset_t ksigmask, sigsaved; | |
2175 | struct timespec64 ts; | |
2176 | int ret; | |
2177 | ||
2178 | if (timeout && unlikely(get_old_timespec32(&ts, timeout))) | |
2179 | return -EFAULT; | |
2180 | ||
2181 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | |
2182 | return -EFAULT; | |
2183 | ||
ded653cc DD |
2184 | |
2185 | ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | |
2186 | if (ret) | |
2187 | return ret; | |
7a074e96 CH |
2188 | |
2189 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); | |
854a6ed5 DD |
2190 | restore_user_sigmask(ksig.sigmask, &sigsaved); |
2191 | if (signal_pending(current) && !ret) | |
2192 | ret = -ERESTARTNOHAND; | |
fa2e62a5 | 2193 | |
7a074e96 | 2194 | return ret; |
1da177e4 | 2195 | } |
c00d2c7e | 2196 | |
7a35397f DD |
2197 | #endif |
2198 | ||
2199 | #if defined(CONFIG_COMPAT_32BIT_TIME) | |
2200 | ||
c00d2c7e AV |
2201 | COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id, |
2202 | compat_long_t, min_nr, | |
2203 | compat_long_t, nr, | |
2204 | struct io_event __user *, events, | |
9afc5eee | 2205 | struct old_timespec32 __user *, timeout) |
c00d2c7e | 2206 | { |
fa2e62a5 | 2207 | struct timespec64 t; |
7a074e96 CH |
2208 | int ret; |
2209 | ||
9afc5eee | 2210 | if (timeout && get_old_timespec32(&t, timeout)) |
7a074e96 CH |
2211 | return -EFAULT; |
2212 | ||
2213 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); | |
2214 | if (!ret && signal_pending(current)) | |
2215 | ret = -EINTR; | |
2216 | return ret; | |
2217 | } | |
2218 | ||
7a35397f DD |
2219 | #endif |
2220 | ||
2221 | #ifdef CONFIG_COMPAT | |
c00d2c7e | 2222 | |
7a074e96 CH |
2223 | struct __compat_aio_sigset { |
2224 | compat_sigset_t __user *sigmask; | |
2225 | compat_size_t sigsetsize; | |
2226 | }; | |
2227 | ||
7a35397f DD |
2228 | #if defined(CONFIG_COMPAT_32BIT_TIME) |
2229 | ||
7a074e96 CH |
2230 | COMPAT_SYSCALL_DEFINE6(io_pgetevents, |
2231 | compat_aio_context_t, ctx_id, | |
2232 | compat_long_t, min_nr, | |
2233 | compat_long_t, nr, | |
2234 | struct io_event __user *, events, | |
9afc5eee | 2235 | struct old_timespec32 __user *, timeout, |
7a074e96 CH |
2236 | const struct __compat_aio_sigset __user *, usig) |
2237 | { | |
2238 | struct __compat_aio_sigset ksig = { NULL, }; | |
2239 | sigset_t ksigmask, sigsaved; | |
2240 | struct timespec64 t; | |
2241 | int ret; | |
2242 | ||
9afc5eee | 2243 | if (timeout && get_old_timespec32(&t, timeout)) |
7a074e96 CH |
2244 | return -EFAULT; |
2245 | ||
2246 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | |
2247 | return -EFAULT; | |
2248 | ||
ded653cc DD |
2249 | ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); |
2250 | if (ret) | |
2251 | return ret; | |
c00d2c7e | 2252 | |
7a074e96 | 2253 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); |
854a6ed5 DD |
2254 | restore_user_sigmask(ksig.sigmask, &sigsaved); |
2255 | if (signal_pending(current) && !ret) | |
2256 | ret = -ERESTARTNOHAND; | |
fa2e62a5 | 2257 | |
7a074e96 | 2258 | return ret; |
c00d2c7e | 2259 | } |
7a35397f DD |
2260 | |
2261 | #endif | |
2262 | ||
2263 | COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, | |
2264 | compat_aio_context_t, ctx_id, | |
2265 | compat_long_t, min_nr, | |
2266 | compat_long_t, nr, | |
2267 | struct io_event __user *, events, | |
2268 | struct __kernel_timespec __user *, timeout, | |
2269 | const struct __compat_aio_sigset __user *, usig) | |
2270 | { | |
2271 | struct __compat_aio_sigset ksig = { NULL, }; | |
2272 | sigset_t ksigmask, sigsaved; | |
2273 | struct timespec64 t; | |
2274 | int ret; | |
2275 | ||
2276 | if (timeout && get_timespec64(&t, timeout)) | |
2277 | return -EFAULT; | |
2278 | ||
2279 | if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) | |
2280 | return -EFAULT; | |
2281 | ||
2282 | ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize); | |
2283 | if (ret) | |
2284 | return ret; | |
2285 | ||
2286 | ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); | |
2287 | restore_user_sigmask(ksig.sigmask, &sigsaved); | |
2288 | if (signal_pending(current) && !ret) | |
2289 | ret = -ERESTARTNOHAND; | |
fa2e62a5 | 2290 | |
7a074e96 | 2291 | return ret; |
c00d2c7e AV |
2292 | } |
2293 | #endif |