]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * An async IO implementation for Linux | |
3 | * Written by Benjamin LaHaise <[email protected]> | |
4 | * | |
5 | * Implements an efficient asynchronous io interface. | |
6 | * | |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. | |
8 | * | |
9 | * See ../COPYING for licensing terms. | |
10 | */ | |
caf4167a KO |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
12 | ||
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/time.h> | |
17 | #include <linux/aio_abi.h> | |
630d9c47 | 18 | #include <linux/export.h> |
1da177e4 | 19 | #include <linux/syscalls.h> |
b9d128f1 | 20 | #include <linux/backing-dev.h> |
027445c3 | 21 | #include <linux/uio.h> |
1da177e4 | 22 | |
174cd4b1 | 23 | #include <linux/sched/signal.h> |
1da177e4 LT |
24 | #include <linux/fs.h> |
25 | #include <linux/file.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/mman.h> | |
3d2d827f | 28 | #include <linux/mmu_context.h> |
e1bdd5f2 | 29 | #include <linux/percpu.h> |
1da177e4 LT |
30 | #include <linux/slab.h> |
31 | #include <linux/timer.h> | |
32 | #include <linux/aio.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/security.h> | |
9c3060be | 36 | #include <linux/eventfd.h> |
cfb1e33e | 37 | #include <linux/blkdev.h> |
9d85cba7 | 38 | #include <linux/compat.h> |
36bc08cc GZ |
39 | #include <linux/migrate.h> |
40 | #include <linux/ramfs.h> | |
723be6e3 | 41 | #include <linux/percpu-refcount.h> |
71ad7490 | 42 | #include <linux/mount.h> |
1da177e4 LT |
43 | |
44 | #include <asm/kmap_types.h> | |
7c0f6ba6 | 45 | #include <linux/uaccess.h> |
1da177e4 | 46 | |
68d70d03 AV |
47 | #include "internal.h" |
48 | ||
4e179bca KO |
49 | #define AIO_RING_MAGIC 0xa10a10a1 |
50 | #define AIO_RING_COMPAT_FEATURES 1 | |
51 | #define AIO_RING_INCOMPAT_FEATURES 0 | |
52 | struct aio_ring { | |
53 | unsigned id; /* kernel internal index number */ | |
54 | unsigned nr; /* number of io_events */ | |
fa8a53c3 BL |
55 | unsigned head; /* Written to by userland or under ring_lock |
56 | * mutex by aio_read_events_ring(). */ | |
4e179bca KO |
57 | unsigned tail; |
58 | ||
59 | unsigned magic; | |
60 | unsigned compat_features; | |
61 | unsigned incompat_features; | |
62 | unsigned header_length; /* size of aio_ring */ | |
63 | ||
64 | ||
65 | struct io_event io_events[0]; | |
66 | }; /* 128 bytes + ring size */ | |
67 | ||
68 | #define AIO_RING_PAGES 8 | |
4e179bca | 69 | |
db446a08 BL |
70 | struct kioctx_table { |
71 | struct rcu_head rcu; | |
72 | unsigned nr; | |
73 | struct kioctx *table[]; | |
74 | }; | |
75 | ||
e1bdd5f2 KO |
76 | struct kioctx_cpu { |
77 | unsigned reqs_available; | |
78 | }; | |
79 | ||
dc48e56d JA |
80 | struct ctx_rq_wait { |
81 | struct completion comp; | |
82 | atomic_t count; | |
83 | }; | |
84 | ||
4e179bca | 85 | struct kioctx { |
723be6e3 | 86 | struct percpu_ref users; |
36f55889 | 87 | atomic_t dead; |
4e179bca | 88 | |
e34ecee2 KO |
89 | struct percpu_ref reqs; |
90 | ||
4e179bca | 91 | unsigned long user_id; |
4e179bca | 92 | |
e1bdd5f2 KO |
93 | struct __percpu kioctx_cpu *cpu; |
94 | ||
95 | /* | |
96 | * For percpu reqs_available, number of slots we move to/from global | |
97 | * counter at a time: | |
98 | */ | |
99 | unsigned req_batch; | |
3e845ce0 KO |
100 | /* |
101 | * This is what userspace passed to io_setup(), it's not used for | |
102 | * anything but counting against the global max_reqs quota. | |
103 | * | |
58c85dc2 | 104 | * The real limit is nr_events - 1, which will be larger (see |
3e845ce0 KO |
105 | * aio_setup_ring()) |
106 | */ | |
4e179bca KO |
107 | unsigned max_reqs; |
108 | ||
58c85dc2 KO |
109 | /* Size of ringbuffer, in units of struct io_event */ |
110 | unsigned nr_events; | |
4e179bca | 111 | |
58c85dc2 KO |
112 | unsigned long mmap_base; |
113 | unsigned long mmap_size; | |
114 | ||
115 | struct page **ring_pages; | |
116 | long nr_pages; | |
117 | ||
723be6e3 | 118 | struct work_struct free_work; |
4e23bcae | 119 | |
e02ba72a AP |
120 | /* |
121 | * signals when all in-flight requests are done | |
122 | */ | |
dc48e56d | 123 | struct ctx_rq_wait *rq_wait; |
e02ba72a | 124 | |
4e23bcae | 125 | struct { |
34e83fc6 KO |
126 | /* |
127 | * This counts the number of available slots in the ringbuffer, | |
128 | * so we avoid overflowing it: it's decremented (if positive) | |
129 | * when allocating a kiocb and incremented when the resulting | |
130 | * io_event is pulled off the ringbuffer. | |
e1bdd5f2 KO |
131 | * |
132 | * We batch accesses to it with a percpu version. | |
34e83fc6 KO |
133 | */ |
134 | atomic_t reqs_available; | |
4e23bcae KO |
135 | } ____cacheline_aligned_in_smp; |
136 | ||
137 | struct { | |
138 | spinlock_t ctx_lock; | |
139 | struct list_head active_reqs; /* used for cancellation */ | |
140 | } ____cacheline_aligned_in_smp; | |
141 | ||
58c85dc2 KO |
142 | struct { |
143 | struct mutex ring_lock; | |
4e23bcae KO |
144 | wait_queue_head_t wait; |
145 | } ____cacheline_aligned_in_smp; | |
58c85dc2 KO |
146 | |
147 | struct { | |
148 | unsigned tail; | |
d856f32a | 149 | unsigned completed_events; |
58c85dc2 | 150 | spinlock_t completion_lock; |
4e23bcae | 151 | } ____cacheline_aligned_in_smp; |
58c85dc2 KO |
152 | |
153 | struct page *internal_pages[AIO_RING_PAGES]; | |
36bc08cc | 154 | struct file *aio_ring_file; |
db446a08 BL |
155 | |
156 | unsigned id; | |
4e179bca KO |
157 | }; |
158 | ||
04b2fa9f CH |
159 | /* |
160 | * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either | |
161 | * cancelled or completed (this makes a certain amount of sense because | |
162 | * successful cancellation - io_cancel() - does deliver the completion to | |
163 | * userspace). | |
164 | * | |
165 | * And since most things don't implement kiocb cancellation and we'd really like | |
166 | * kiocb completion to be lockless when possible, we use ki_cancel to | |
167 | * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED | |
168 | * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). | |
169 | */ | |
170 | #define KIOCB_CANCELLED ((void *) (~0ULL)) | |
171 | ||
172 | struct aio_kiocb { | |
173 | struct kiocb common; | |
174 | ||
175 | struct kioctx *ki_ctx; | |
176 | kiocb_cancel_fn *ki_cancel; | |
177 | ||
178 | struct iocb __user *ki_user_iocb; /* user's aiocb */ | |
179 | __u64 ki_user_data; /* user's data for completion */ | |
180 | ||
181 | struct list_head ki_list; /* the aio core uses this | |
182 | * for cancellation */ | |
183 | ||
184 | /* | |
185 | * If the aio_resfd field of the userspace iocb is not zero, | |
186 | * this is the underlying eventfd context to deliver events to. | |
187 | */ | |
188 | struct eventfd_ctx *ki_eventfd; | |
189 | }; | |
190 | ||
1da177e4 | 191 | /*------ sysctl variables----*/ |
d55b5fda ZB |
192 | static DEFINE_SPINLOCK(aio_nr_lock); |
193 | unsigned long aio_nr; /* current system wide number of aio requests */ | |
194 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ | |
1da177e4 LT |
195 | /*----end sysctl variables---*/ |
196 | ||
e18b890b CL |
197 | static struct kmem_cache *kiocb_cachep; |
198 | static struct kmem_cache *kioctx_cachep; | |
1da177e4 | 199 | |
71ad7490 BL |
200 | static struct vfsmount *aio_mnt; |
201 | ||
202 | static const struct file_operations aio_ring_fops; | |
203 | static const struct address_space_operations aio_ctx_aops; | |
204 | ||
205 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) | |
206 | { | |
207 | struct qstr this = QSTR_INIT("[aio]", 5); | |
208 | struct file *file; | |
209 | struct path path; | |
210 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); | |
7f62656b DC |
211 | if (IS_ERR(inode)) |
212 | return ERR_CAST(inode); | |
71ad7490 BL |
213 | |
214 | inode->i_mapping->a_ops = &aio_ctx_aops; | |
215 | inode->i_mapping->private_data = ctx; | |
216 | inode->i_size = PAGE_SIZE * nr_pages; | |
217 | ||
218 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); | |
219 | if (!path.dentry) { | |
220 | iput(inode); | |
221 | return ERR_PTR(-ENOMEM); | |
222 | } | |
223 | path.mnt = mntget(aio_mnt); | |
224 | ||
225 | d_instantiate(path.dentry, inode); | |
226 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); | |
227 | if (IS_ERR(file)) { | |
228 | path_put(&path); | |
229 | return file; | |
230 | } | |
231 | ||
232 | file->f_flags = O_RDWR; | |
71ad7490 BL |
233 | return file; |
234 | } | |
235 | ||
236 | static struct dentry *aio_mount(struct file_system_type *fs_type, | |
237 | int flags, const char *dev_name, void *data) | |
238 | { | |
239 | static const struct dentry_operations ops = { | |
240 | .d_dname = simple_dname, | |
241 | }; | |
22f6b4d3 JH |
242 | struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops, |
243 | AIO_RING_MAGIC); | |
244 | ||
245 | if (!IS_ERR(root)) | |
246 | root->d_sb->s_iflags |= SB_I_NOEXEC; | |
247 | return root; | |
71ad7490 BL |
248 | } |
249 | ||
1da177e4 LT |
250 | /* aio_setup |
251 | * Creates the slab caches used by the aio routines, panic on | |
252 | * failure as this is done early during the boot sequence. | |
253 | */ | |
254 | static int __init aio_setup(void) | |
255 | { | |
71ad7490 BL |
256 | static struct file_system_type aio_fs = { |
257 | .name = "aio", | |
258 | .mount = aio_mount, | |
259 | .kill_sb = kill_anon_super, | |
260 | }; | |
261 | aio_mnt = kern_mount(&aio_fs); | |
262 | if (IS_ERR(aio_mnt)) | |
263 | panic("Failed to create aio fs mount."); | |
264 | ||
04b2fa9f | 265 | kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
0a31bd5f | 266 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
1da177e4 | 267 | |
caf4167a | 268 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
1da177e4 LT |
269 | |
270 | return 0; | |
271 | } | |
385773e0 | 272 | __initcall(aio_setup); |
1da177e4 | 273 | |
5e9ae2e5 BL |
274 | static void put_aio_ring_file(struct kioctx *ctx) |
275 | { | |
276 | struct file *aio_ring_file = ctx->aio_ring_file; | |
de04e769 RV |
277 | struct address_space *i_mapping; |
278 | ||
5e9ae2e5 | 279 | if (aio_ring_file) { |
45063097 | 280 | truncate_setsize(file_inode(aio_ring_file), 0); |
5e9ae2e5 BL |
281 | |
282 | /* Prevent further access to the kioctx from migratepages */ | |
45063097 | 283 | i_mapping = aio_ring_file->f_mapping; |
de04e769 RV |
284 | spin_lock(&i_mapping->private_lock); |
285 | i_mapping->private_data = NULL; | |
5e9ae2e5 | 286 | ctx->aio_ring_file = NULL; |
de04e769 | 287 | spin_unlock(&i_mapping->private_lock); |
5e9ae2e5 BL |
288 | |
289 | fput(aio_ring_file); | |
290 | } | |
291 | } | |
292 | ||
1da177e4 LT |
293 | static void aio_free_ring(struct kioctx *ctx) |
294 | { | |
36bc08cc | 295 | int i; |
1da177e4 | 296 | |
fa8a53c3 BL |
297 | /* Disconnect the kiotx from the ring file. This prevents future |
298 | * accesses to the kioctx from page migration. | |
299 | */ | |
300 | put_aio_ring_file(ctx); | |
301 | ||
36bc08cc | 302 | for (i = 0; i < ctx->nr_pages; i++) { |
8e321fef | 303 | struct page *page; |
36bc08cc GZ |
304 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
305 | page_count(ctx->ring_pages[i])); | |
8e321fef BL |
306 | page = ctx->ring_pages[i]; |
307 | if (!page) | |
308 | continue; | |
309 | ctx->ring_pages[i] = NULL; | |
310 | put_page(page); | |
36bc08cc | 311 | } |
1da177e4 | 312 | |
ddb8c45b | 313 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
58c85dc2 | 314 | kfree(ctx->ring_pages); |
ddb8c45b SL |
315 | ctx->ring_pages = NULL; |
316 | } | |
36bc08cc GZ |
317 | } |
318 | ||
5477e70a | 319 | static int aio_ring_mremap(struct vm_area_struct *vma) |
e4a0d3e7 | 320 | { |
5477e70a | 321 | struct file *file = vma->vm_file; |
e4a0d3e7 PE |
322 | struct mm_struct *mm = vma->vm_mm; |
323 | struct kioctx_table *table; | |
b2edffdd | 324 | int i, res = -EINVAL; |
e4a0d3e7 PE |
325 | |
326 | spin_lock(&mm->ioctx_lock); | |
327 | rcu_read_lock(); | |
328 | table = rcu_dereference(mm->ioctx_table); | |
329 | for (i = 0; i < table->nr; i++) { | |
330 | struct kioctx *ctx; | |
331 | ||
332 | ctx = table->table[i]; | |
333 | if (ctx && ctx->aio_ring_file == file) { | |
b2edffdd AV |
334 | if (!atomic_read(&ctx->dead)) { |
335 | ctx->user_id = ctx->mmap_base = vma->vm_start; | |
336 | res = 0; | |
337 | } | |
e4a0d3e7 PE |
338 | break; |
339 | } | |
340 | } | |
341 | ||
342 | rcu_read_unlock(); | |
343 | spin_unlock(&mm->ioctx_lock); | |
b2edffdd | 344 | return res; |
e4a0d3e7 PE |
345 | } |
346 | ||
5477e70a ON |
347 | static const struct vm_operations_struct aio_ring_vm_ops = { |
348 | .mremap = aio_ring_mremap, | |
349 | #if IS_ENABLED(CONFIG_MMU) | |
350 | .fault = filemap_fault, | |
351 | .map_pages = filemap_map_pages, | |
352 | .page_mkwrite = filemap_page_mkwrite, | |
353 | #endif | |
354 | }; | |
355 | ||
356 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | |
357 | { | |
358 | vma->vm_flags |= VM_DONTEXPAND; | |
359 | vma->vm_ops = &aio_ring_vm_ops; | |
360 | return 0; | |
361 | } | |
362 | ||
36bc08cc GZ |
363 | static const struct file_operations aio_ring_fops = { |
364 | .mmap = aio_ring_mmap, | |
365 | }; | |
366 | ||
0c45355f | 367 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc GZ |
368 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
369 | struct page *old, enum migrate_mode mode) | |
370 | { | |
5e9ae2e5 | 371 | struct kioctx *ctx; |
36bc08cc | 372 | unsigned long flags; |
fa8a53c3 | 373 | pgoff_t idx; |
36bc08cc GZ |
374 | int rc; |
375 | ||
8e321fef BL |
376 | rc = 0; |
377 | ||
fa8a53c3 | 378 | /* mapping->private_lock here protects against the kioctx teardown. */ |
8e321fef BL |
379 | spin_lock(&mapping->private_lock); |
380 | ctx = mapping->private_data; | |
fa8a53c3 BL |
381 | if (!ctx) { |
382 | rc = -EINVAL; | |
383 | goto out; | |
384 | } | |
385 | ||
386 | /* The ring_lock mutex. The prevents aio_read_events() from writing | |
387 | * to the ring's head, and prevents page migration from mucking in | |
388 | * a partially initialized kiotx. | |
389 | */ | |
390 | if (!mutex_trylock(&ctx->ring_lock)) { | |
391 | rc = -EAGAIN; | |
392 | goto out; | |
393 | } | |
394 | ||
395 | idx = old->index; | |
396 | if (idx < (pgoff_t)ctx->nr_pages) { | |
397 | /* Make sure the old page hasn't already been changed */ | |
398 | if (ctx->ring_pages[idx] != old) | |
399 | rc = -EAGAIN; | |
8e321fef BL |
400 | } else |
401 | rc = -EINVAL; | |
8e321fef BL |
402 | |
403 | if (rc != 0) | |
fa8a53c3 | 404 | goto out_unlock; |
8e321fef | 405 | |
36bc08cc GZ |
406 | /* Writeback must be complete */ |
407 | BUG_ON(PageWriteback(old)); | |
8e321fef | 408 | get_page(new); |
36bc08cc | 409 | |
8e321fef | 410 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
36bc08cc | 411 | if (rc != MIGRATEPAGE_SUCCESS) { |
8e321fef | 412 | put_page(new); |
fa8a53c3 | 413 | goto out_unlock; |
36bc08cc GZ |
414 | } |
415 | ||
fa8a53c3 BL |
416 | /* Take completion_lock to prevent other writes to the ring buffer |
417 | * while the old page is copied to the new. This prevents new | |
418 | * events from being lost. | |
5e9ae2e5 | 419 | */ |
fa8a53c3 BL |
420 | spin_lock_irqsave(&ctx->completion_lock, flags); |
421 | migrate_page_copy(new, old); | |
422 | BUG_ON(ctx->ring_pages[idx] != old); | |
423 | ctx->ring_pages[idx] = new; | |
424 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | |
36bc08cc | 425 | |
fa8a53c3 BL |
426 | /* The old page is no longer accessible. */ |
427 | put_page(old); | |
8e321fef | 428 | |
fa8a53c3 BL |
429 | out_unlock: |
430 | mutex_unlock(&ctx->ring_lock); | |
431 | out: | |
432 | spin_unlock(&mapping->private_lock); | |
36bc08cc | 433 | return rc; |
1da177e4 | 434 | } |
0c45355f | 435 | #endif |
1da177e4 | 436 | |
36bc08cc | 437 | static const struct address_space_operations aio_ctx_aops = { |
835f252c | 438 | .set_page_dirty = __set_page_dirty_no_writeback, |
0c45355f | 439 | #if IS_ENABLED(CONFIG_MIGRATION) |
36bc08cc | 440 | .migratepage = aio_migratepage, |
0c45355f | 441 | #endif |
36bc08cc GZ |
442 | }; |
443 | ||
1da177e4 LT |
444 | static int aio_setup_ring(struct kioctx *ctx) |
445 | { | |
446 | struct aio_ring *ring; | |
1da177e4 | 447 | unsigned nr_events = ctx->max_reqs; |
41003a7b | 448 | struct mm_struct *mm = current->mm; |
3dc9acb6 | 449 | unsigned long size, unused; |
1da177e4 | 450 | int nr_pages; |
36bc08cc GZ |
451 | int i; |
452 | struct file *file; | |
1da177e4 LT |
453 | |
454 | /* Compensate for the ring buffer's head/tail overlap entry */ | |
455 | nr_events += 2; /* 1 is required, 2 for good luck */ | |
456 | ||
457 | size = sizeof(struct aio_ring); | |
458 | size += sizeof(struct io_event) * nr_events; | |
1da177e4 | 459 | |
36bc08cc | 460 | nr_pages = PFN_UP(size); |
1da177e4 LT |
461 | if (nr_pages < 0) |
462 | return -EINVAL; | |
463 | ||
71ad7490 | 464 | file = aio_private_file(ctx, nr_pages); |
36bc08cc GZ |
465 | if (IS_ERR(file)) { |
466 | ctx->aio_ring_file = NULL; | |
fa8a53c3 | 467 | return -ENOMEM; |
36bc08cc GZ |
468 | } |
469 | ||
3dc9acb6 LT |
470 | ctx->aio_ring_file = file; |
471 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | |
472 | / sizeof(struct io_event); | |
473 | ||
474 | ctx->ring_pages = ctx->internal_pages; | |
475 | if (nr_pages > AIO_RING_PAGES) { | |
476 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | |
477 | GFP_KERNEL); | |
478 | if (!ctx->ring_pages) { | |
479 | put_aio_ring_file(ctx); | |
480 | return -ENOMEM; | |
481 | } | |
482 | } | |
483 | ||
36bc08cc GZ |
484 | for (i = 0; i < nr_pages; i++) { |
485 | struct page *page; | |
45063097 | 486 | page = find_or_create_page(file->f_mapping, |
36bc08cc GZ |
487 | i, GFP_HIGHUSER | __GFP_ZERO); |
488 | if (!page) | |
489 | break; | |
490 | pr_debug("pid(%d) page[%d]->count=%d\n", | |
491 | current->pid, i, page_count(page)); | |
492 | SetPageUptodate(page); | |
36bc08cc | 493 | unlock_page(page); |
3dc9acb6 LT |
494 | |
495 | ctx->ring_pages[i] = page; | |
36bc08cc | 496 | } |
3dc9acb6 | 497 | ctx->nr_pages = i; |
1da177e4 | 498 | |
3dc9acb6 LT |
499 | if (unlikely(i != nr_pages)) { |
500 | aio_free_ring(ctx); | |
fa8a53c3 | 501 | return -ENOMEM; |
1da177e4 LT |
502 | } |
503 | ||
58c85dc2 KO |
504 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
505 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); | |
36bc08cc | 506 | |
013373e8 MH |
507 | if (down_write_killable(&mm->mmap_sem)) { |
508 | ctx->mmap_size = 0; | |
509 | aio_free_ring(ctx); | |
510 | return -EINTR; | |
511 | } | |
512 | ||
36bc08cc GZ |
513 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
514 | PROT_READ | PROT_WRITE, | |
897ab3e0 | 515 | MAP_SHARED, 0, &unused, NULL); |
3dc9acb6 | 516 | up_write(&mm->mmap_sem); |
58c85dc2 | 517 | if (IS_ERR((void *)ctx->mmap_base)) { |
58c85dc2 | 518 | ctx->mmap_size = 0; |
1da177e4 | 519 | aio_free_ring(ctx); |
fa8a53c3 | 520 | return -ENOMEM; |
1da177e4 LT |
521 | } |
522 | ||
58c85dc2 | 523 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
d6c355c7 | 524 | |
58c85dc2 KO |
525 | ctx->user_id = ctx->mmap_base; |
526 | ctx->nr_events = nr_events; /* trusted copy */ | |
1da177e4 | 527 | |
58c85dc2 | 528 | ring = kmap_atomic(ctx->ring_pages[0]); |
1da177e4 | 529 | ring->nr = nr_events; /* user copy */ |
db446a08 | 530 | ring->id = ~0U; |
1da177e4 LT |
531 | ring->head = ring->tail = 0; |
532 | ring->magic = AIO_RING_MAGIC; | |
533 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | |
534 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; | |
535 | ring->header_length = sizeof(struct aio_ring); | |
e8e3c3d6 | 536 | kunmap_atomic(ring); |
58c85dc2 | 537 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 LT |
538 | |
539 | return 0; | |
540 | } | |
541 | ||
1da177e4 LT |
542 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
543 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) | |
544 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) | |
545 | ||
04b2fa9f | 546 | void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) |
0460fef2 | 547 | { |
04b2fa9f | 548 | struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, common); |
0460fef2 KO |
549 | struct kioctx *ctx = req->ki_ctx; |
550 | unsigned long flags; | |
551 | ||
552 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
553 | ||
554 | if (!req->ki_list.next) | |
555 | list_add(&req->ki_list, &ctx->active_reqs); | |
556 | ||
557 | req->ki_cancel = cancel; | |
558 | ||
559 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
560 | } | |
561 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | |
562 | ||
04b2fa9f | 563 | static int kiocb_cancel(struct aio_kiocb *kiocb) |
906b973c | 564 | { |
0460fef2 | 565 | kiocb_cancel_fn *old, *cancel; |
906b973c | 566 | |
0460fef2 KO |
567 | /* |
568 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | |
569 | * actually has a cancel function, hence the cmpxchg() | |
570 | */ | |
571 | ||
572 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | |
573 | do { | |
574 | if (!cancel || cancel == KIOCB_CANCELLED) | |
57282d8f | 575 | return -EINVAL; |
906b973c | 576 | |
0460fef2 KO |
577 | old = cancel; |
578 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | |
579 | } while (cancel != old); | |
906b973c | 580 | |
04b2fa9f | 581 | return cancel(&kiocb->common); |
906b973c KO |
582 | } |
583 | ||
e34ecee2 | 584 | static void free_ioctx(struct work_struct *work) |
36f55889 | 585 | { |
e34ecee2 | 586 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
e1bdd5f2 | 587 | |
e34ecee2 | 588 | pr_debug("freeing %p\n", ctx); |
e1bdd5f2 | 589 | |
e34ecee2 | 590 | aio_free_ring(ctx); |
e1bdd5f2 | 591 | free_percpu(ctx->cpu); |
9a1049da TH |
592 | percpu_ref_exit(&ctx->reqs); |
593 | percpu_ref_exit(&ctx->users); | |
36f55889 KO |
594 | kmem_cache_free(kioctx_cachep, ctx); |
595 | } | |
596 | ||
e34ecee2 KO |
597 | static void free_ioctx_reqs(struct percpu_ref *ref) |
598 | { | |
599 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | |
600 | ||
e02ba72a | 601 | /* At this point we know that there are no any in-flight requests */ |
dc48e56d JA |
602 | if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) |
603 | complete(&ctx->rq_wait->comp); | |
e02ba72a | 604 | |
e34ecee2 KO |
605 | INIT_WORK(&ctx->free_work, free_ioctx); |
606 | schedule_work(&ctx->free_work); | |
607 | } | |
608 | ||
36f55889 KO |
609 | /* |
610 | * When this function runs, the kioctx has been removed from the "hash table" | |
611 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | |
612 | * now it's safe to cancel any that need to be. | |
613 | */ | |
e34ecee2 | 614 | static void free_ioctx_users(struct percpu_ref *ref) |
36f55889 | 615 | { |
e34ecee2 | 616 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
04b2fa9f | 617 | struct aio_kiocb *req; |
36f55889 KO |
618 | |
619 | spin_lock_irq(&ctx->ctx_lock); | |
620 | ||
621 | while (!list_empty(&ctx->active_reqs)) { | |
622 | req = list_first_entry(&ctx->active_reqs, | |
04b2fa9f | 623 | struct aio_kiocb, ki_list); |
36f55889 KO |
624 | |
625 | list_del_init(&req->ki_list); | |
d52a8f9e | 626 | kiocb_cancel(req); |
36f55889 KO |
627 | } |
628 | ||
629 | spin_unlock_irq(&ctx->ctx_lock); | |
630 | ||
e34ecee2 KO |
631 | percpu_ref_kill(&ctx->reqs); |
632 | percpu_ref_put(&ctx->reqs); | |
36f55889 KO |
633 | } |
634 | ||
db446a08 BL |
635 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
636 | { | |
637 | unsigned i, new_nr; | |
638 | struct kioctx_table *table, *old; | |
639 | struct aio_ring *ring; | |
640 | ||
641 | spin_lock(&mm->ioctx_lock); | |
855ef0de | 642 | table = rcu_dereference_raw(mm->ioctx_table); |
db446a08 BL |
643 | |
644 | while (1) { | |
645 | if (table) | |
646 | for (i = 0; i < table->nr; i++) | |
647 | if (!table->table[i]) { | |
648 | ctx->id = i; | |
649 | table->table[i] = ctx; | |
650 | spin_unlock(&mm->ioctx_lock); | |
651 | ||
fa8a53c3 BL |
652 | /* While kioctx setup is in progress, |
653 | * we are protected from page migration | |
654 | * changes ring_pages by ->ring_lock. | |
655 | */ | |
db446a08 BL |
656 | ring = kmap_atomic(ctx->ring_pages[0]); |
657 | ring->id = ctx->id; | |
658 | kunmap_atomic(ring); | |
659 | return 0; | |
660 | } | |
661 | ||
662 | new_nr = (table ? table->nr : 1) * 4; | |
db446a08 BL |
663 | spin_unlock(&mm->ioctx_lock); |
664 | ||
665 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * | |
666 | new_nr, GFP_KERNEL); | |
667 | if (!table) | |
668 | return -ENOMEM; | |
669 | ||
670 | table->nr = new_nr; | |
671 | ||
672 | spin_lock(&mm->ioctx_lock); | |
855ef0de | 673 | old = rcu_dereference_raw(mm->ioctx_table); |
db446a08 BL |
674 | |
675 | if (!old) { | |
676 | rcu_assign_pointer(mm->ioctx_table, table); | |
677 | } else if (table->nr > old->nr) { | |
678 | memcpy(table->table, old->table, | |
679 | old->nr * sizeof(struct kioctx *)); | |
680 | ||
681 | rcu_assign_pointer(mm->ioctx_table, table); | |
682 | kfree_rcu(old, rcu); | |
683 | } else { | |
684 | kfree(table); | |
685 | table = old; | |
686 | } | |
687 | } | |
688 | } | |
689 | ||
e34ecee2 KO |
690 | static void aio_nr_sub(unsigned nr) |
691 | { | |
692 | spin_lock(&aio_nr_lock); | |
693 | if (WARN_ON(aio_nr - nr > aio_nr)) | |
694 | aio_nr = 0; | |
695 | else | |
696 | aio_nr -= nr; | |
697 | spin_unlock(&aio_nr_lock); | |
698 | } | |
699 | ||
1da177e4 LT |
700 | /* ioctx_alloc |
701 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | |
702 | */ | |
703 | static struct kioctx *ioctx_alloc(unsigned nr_events) | |
704 | { | |
41003a7b | 705 | struct mm_struct *mm = current->mm; |
1da177e4 | 706 | struct kioctx *ctx; |
e23754f8 | 707 | int err = -ENOMEM; |
1da177e4 | 708 | |
e1bdd5f2 KO |
709 | /* |
710 | * We keep track of the number of available ringbuffer slots, to prevent | |
711 | * overflow (reqs_available), and we also use percpu counters for this. | |
712 | * | |
713 | * So since up to half the slots might be on other cpu's percpu counters | |
714 | * and unavailable, double nr_events so userspace sees what they | |
715 | * expected: additionally, we move req_batch slots to/from percpu | |
716 | * counters at a time, so make sure that isn't 0: | |
717 | */ | |
718 | nr_events = max(nr_events, num_possible_cpus() * 4); | |
719 | nr_events *= 2; | |
720 | ||
1da177e4 | 721 | /* Prevent overflows */ |
08397acd | 722 | if (nr_events > (0x10000000U / sizeof(struct io_event))) { |
1da177e4 LT |
723 | pr_debug("ENOMEM: nr_events too high\n"); |
724 | return ERR_PTR(-EINVAL); | |
725 | } | |
726 | ||
4cd81c3d | 727 | if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) |
1da177e4 LT |
728 | return ERR_PTR(-EAGAIN); |
729 | ||
c3762229 | 730 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
1da177e4 LT |
731 | if (!ctx) |
732 | return ERR_PTR(-ENOMEM); | |
733 | ||
1da177e4 | 734 | ctx->max_reqs = nr_events; |
1da177e4 | 735 | |
1da177e4 | 736 | spin_lock_init(&ctx->ctx_lock); |
0460fef2 | 737 | spin_lock_init(&ctx->completion_lock); |
58c85dc2 | 738 | mutex_init(&ctx->ring_lock); |
fa8a53c3 BL |
739 | /* Protect against page migration throughout kiotx setup by keeping |
740 | * the ring_lock mutex held until setup is complete. */ | |
741 | mutex_lock(&ctx->ring_lock); | |
1da177e4 LT |
742 | init_waitqueue_head(&ctx->wait); |
743 | ||
744 | INIT_LIST_HEAD(&ctx->active_reqs); | |
1da177e4 | 745 | |
2aad2a86 | 746 | if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) |
fa8a53c3 BL |
747 | goto err; |
748 | ||
2aad2a86 | 749 | if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) |
fa8a53c3 BL |
750 | goto err; |
751 | ||
e1bdd5f2 KO |
752 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
753 | if (!ctx->cpu) | |
e34ecee2 | 754 | goto err; |
1da177e4 | 755 | |
fa8a53c3 BL |
756 | err = aio_setup_ring(ctx); |
757 | if (err < 0) | |
e34ecee2 | 758 | goto err; |
e1bdd5f2 | 759 | |
34e83fc6 | 760 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
e1bdd5f2 | 761 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
6878ea72 BL |
762 | if (ctx->req_batch < 1) |
763 | ctx->req_batch = 1; | |
34e83fc6 | 764 | |
1da177e4 | 765 | /* limit the number of system wide aios */ |
9fa1cb39 | 766 | spin_lock(&aio_nr_lock); |
4cd81c3d | 767 | if (aio_nr + nr_events > (aio_max_nr * 2UL) || |
2dd542b7 | 768 | aio_nr + nr_events < aio_nr) { |
9fa1cb39 | 769 | spin_unlock(&aio_nr_lock); |
e34ecee2 | 770 | err = -EAGAIN; |
d1b94327 | 771 | goto err_ctx; |
2dd542b7 AV |
772 | } |
773 | aio_nr += ctx->max_reqs; | |
9fa1cb39 | 774 | spin_unlock(&aio_nr_lock); |
1da177e4 | 775 | |
1881686f BL |
776 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
777 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ | |
723be6e3 | 778 | |
da90382c BL |
779 | err = ioctx_add_table(ctx, mm); |
780 | if (err) | |
e34ecee2 | 781 | goto err_cleanup; |
da90382c | 782 | |
fa8a53c3 BL |
783 | /* Release the ring_lock mutex now that all setup is complete. */ |
784 | mutex_unlock(&ctx->ring_lock); | |
785 | ||
caf4167a | 786 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
58c85dc2 | 787 | ctx, ctx->user_id, mm, ctx->nr_events); |
1da177e4 LT |
788 | return ctx; |
789 | ||
e34ecee2 KO |
790 | err_cleanup: |
791 | aio_nr_sub(ctx->max_reqs); | |
d1b94327 | 792 | err_ctx: |
deeb8525 AV |
793 | atomic_set(&ctx->dead, 1); |
794 | if (ctx->mmap_size) | |
795 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
d1b94327 | 796 | aio_free_ring(ctx); |
e34ecee2 | 797 | err: |
fa8a53c3 | 798 | mutex_unlock(&ctx->ring_lock); |
e1bdd5f2 | 799 | free_percpu(ctx->cpu); |
9a1049da TH |
800 | percpu_ref_exit(&ctx->reqs); |
801 | percpu_ref_exit(&ctx->users); | |
1da177e4 | 802 | kmem_cache_free(kioctx_cachep, ctx); |
caf4167a | 803 | pr_debug("error allocating ioctx %d\n", err); |
e23754f8 | 804 | return ERR_PTR(err); |
1da177e4 LT |
805 | } |
806 | ||
36f55889 KO |
807 | /* kill_ioctx |
808 | * Cancels all outstanding aio requests on an aio context. Used | |
809 | * when the processes owning a context have all exited to encourage | |
810 | * the rapid destruction of the kioctx. | |
811 | */ | |
fb2d4483 | 812 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
dc48e56d | 813 | struct ctx_rq_wait *wait) |
36f55889 | 814 | { |
fa88b6f8 | 815 | struct kioctx_table *table; |
db446a08 | 816 | |
b2edffdd AV |
817 | spin_lock(&mm->ioctx_lock); |
818 | if (atomic_xchg(&ctx->dead, 1)) { | |
819 | spin_unlock(&mm->ioctx_lock); | |
fa88b6f8 | 820 | return -EINVAL; |
b2edffdd | 821 | } |
db446a08 | 822 | |
855ef0de | 823 | table = rcu_dereference_raw(mm->ioctx_table); |
fa88b6f8 BL |
824 | WARN_ON(ctx != table->table[ctx->id]); |
825 | table->table[ctx->id] = NULL; | |
fa88b6f8 | 826 | spin_unlock(&mm->ioctx_lock); |
4fcc712f | 827 | |
fa88b6f8 BL |
828 | /* percpu_ref_kill() will do the necessary call_rcu() */ |
829 | wake_up_all(&ctx->wait); | |
4fcc712f | 830 | |
fa88b6f8 BL |
831 | /* |
832 | * It'd be more correct to do this in free_ioctx(), after all | |
833 | * the outstanding kiocbs have finished - but by then io_destroy | |
834 | * has already returned, so io_setup() could potentially return | |
835 | * -EAGAIN with no ioctxs actually in use (as far as userspace | |
836 | * could tell). | |
837 | */ | |
838 | aio_nr_sub(ctx->max_reqs); | |
4fcc712f | 839 | |
fa88b6f8 BL |
840 | if (ctx->mmap_size) |
841 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | |
fb2d4483 | 842 | |
dc48e56d | 843 | ctx->rq_wait = wait; |
fa88b6f8 BL |
844 | percpu_ref_kill(&ctx->users); |
845 | return 0; | |
1da177e4 LT |
846 | } |
847 | ||
36f55889 KO |
848 | /* |
849 | * exit_aio: called when the last user of mm goes away. At this point, there is | |
850 | * no way for any new requests to be submited or any of the io_* syscalls to be | |
851 | * called on the context. | |
852 | * | |
853 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on | |
854 | * them. | |
1da177e4 | 855 | */ |
fc9b52cd | 856 | void exit_aio(struct mm_struct *mm) |
1da177e4 | 857 | { |
4b70ac5f | 858 | struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); |
dc48e56d JA |
859 | struct ctx_rq_wait wait; |
860 | int i, skipped; | |
db446a08 | 861 | |
4b70ac5f ON |
862 | if (!table) |
863 | return; | |
db446a08 | 864 | |
dc48e56d JA |
865 | atomic_set(&wait.count, table->nr); |
866 | init_completion(&wait.comp); | |
867 | ||
868 | skipped = 0; | |
4b70ac5f ON |
869 | for (i = 0; i < table->nr; ++i) { |
870 | struct kioctx *ctx = table->table[i]; | |
abf137dd | 871 | |
dc48e56d JA |
872 | if (!ctx) { |
873 | skipped++; | |
4b70ac5f | 874 | continue; |
dc48e56d JA |
875 | } |
876 | ||
936af157 | 877 | /* |
4b70ac5f ON |
878 | * We don't need to bother with munmap() here - exit_mmap(mm) |
879 | * is coming and it'll unmap everything. And we simply can't, | |
880 | * this is not necessarily our ->mm. | |
881 | * Since kill_ioctx() uses non-zero ->mmap_size as indicator | |
882 | * that it needs to unmap the area, just set it to 0. | |
936af157 | 883 | */ |
58c85dc2 | 884 | ctx->mmap_size = 0; |
dc48e56d JA |
885 | kill_ioctx(mm, ctx, &wait); |
886 | } | |
36f55889 | 887 | |
dc48e56d | 888 | if (!atomic_sub_and_test(skipped, &wait.count)) { |
6098b45b | 889 | /* Wait until all IO for the context are done. */ |
dc48e56d | 890 | wait_for_completion(&wait.comp); |
1da177e4 | 891 | } |
4b70ac5f ON |
892 | |
893 | RCU_INIT_POINTER(mm->ioctx_table, NULL); | |
894 | kfree(table); | |
1da177e4 LT |
895 | } |
896 | ||
e1bdd5f2 KO |
897 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
898 | { | |
899 | struct kioctx_cpu *kcpu; | |
263782c1 | 900 | unsigned long flags; |
e1bdd5f2 | 901 | |
263782c1 | 902 | local_irq_save(flags); |
be6fb451 | 903 | kcpu = this_cpu_ptr(ctx->cpu); |
e1bdd5f2 | 904 | kcpu->reqs_available += nr; |
263782c1 | 905 | |
e1bdd5f2 KO |
906 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
907 | kcpu->reqs_available -= ctx->req_batch; | |
908 | atomic_add(ctx->req_batch, &ctx->reqs_available); | |
909 | } | |
910 | ||
263782c1 | 911 | local_irq_restore(flags); |
e1bdd5f2 KO |
912 | } |
913 | ||
914 | static bool get_reqs_available(struct kioctx *ctx) | |
915 | { | |
916 | struct kioctx_cpu *kcpu; | |
917 | bool ret = false; | |
263782c1 | 918 | unsigned long flags; |
e1bdd5f2 | 919 | |
263782c1 | 920 | local_irq_save(flags); |
be6fb451 | 921 | kcpu = this_cpu_ptr(ctx->cpu); |
e1bdd5f2 KO |
922 | if (!kcpu->reqs_available) { |
923 | int old, avail = atomic_read(&ctx->reqs_available); | |
924 | ||
925 | do { | |
926 | if (avail < ctx->req_batch) | |
927 | goto out; | |
928 | ||
929 | old = avail; | |
930 | avail = atomic_cmpxchg(&ctx->reqs_available, | |
931 | avail, avail - ctx->req_batch); | |
932 | } while (avail != old); | |
933 | ||
934 | kcpu->reqs_available += ctx->req_batch; | |
935 | } | |
936 | ||
937 | ret = true; | |
938 | kcpu->reqs_available--; | |
939 | out: | |
263782c1 | 940 | local_irq_restore(flags); |
e1bdd5f2 KO |
941 | return ret; |
942 | } | |
943 | ||
d856f32a BL |
944 | /* refill_reqs_available |
945 | * Updates the reqs_available reference counts used for tracking the | |
946 | * number of free slots in the completion ring. This can be called | |
947 | * from aio_complete() (to optimistically update reqs_available) or | |
948 | * from aio_get_req() (the we're out of events case). It must be | |
949 | * called holding ctx->completion_lock. | |
950 | */ | |
951 | static void refill_reqs_available(struct kioctx *ctx, unsigned head, | |
952 | unsigned tail) | |
953 | { | |
954 | unsigned events_in_ring, completed; | |
955 | ||
956 | /* Clamp head since userland can write to it. */ | |
957 | head %= ctx->nr_events; | |
958 | if (head <= tail) | |
959 | events_in_ring = tail - head; | |
960 | else | |
961 | events_in_ring = ctx->nr_events - (head - tail); | |
962 | ||
963 | completed = ctx->completed_events; | |
964 | if (events_in_ring < completed) | |
965 | completed -= events_in_ring; | |
966 | else | |
967 | completed = 0; | |
968 | ||
969 | if (!completed) | |
970 | return; | |
971 | ||
972 | ctx->completed_events -= completed; | |
973 | put_reqs_available(ctx, completed); | |
974 | } | |
975 | ||
976 | /* user_refill_reqs_available | |
977 | * Called to refill reqs_available when aio_get_req() encounters an | |
978 | * out of space in the completion ring. | |
979 | */ | |
980 | static void user_refill_reqs_available(struct kioctx *ctx) | |
981 | { | |
982 | spin_lock_irq(&ctx->completion_lock); | |
983 | if (ctx->completed_events) { | |
984 | struct aio_ring *ring; | |
985 | unsigned head; | |
986 | ||
987 | /* Access of ring->head may race with aio_read_events_ring() | |
988 | * here, but that's okay since whether we read the old version | |
989 | * or the new version, and either will be valid. The important | |
990 | * part is that head cannot pass tail since we prevent | |
991 | * aio_complete() from updating tail by holding | |
992 | * ctx->completion_lock. Even if head is invalid, the check | |
993 | * against ctx->completed_events below will make sure we do the | |
994 | * safe/right thing. | |
995 | */ | |
996 | ring = kmap_atomic(ctx->ring_pages[0]); | |
997 | head = ring->head; | |
998 | kunmap_atomic(ring); | |
999 | ||
1000 | refill_reqs_available(ctx, head, ctx->tail); | |
1001 | } | |
1002 | ||
1003 | spin_unlock_irq(&ctx->completion_lock); | |
1004 | } | |
1005 | ||
1da177e4 | 1006 | /* aio_get_req |
57282d8f KO |
1007 | * Allocate a slot for an aio request. |
1008 | * Returns NULL if no requests are free. | |
1da177e4 | 1009 | */ |
04b2fa9f | 1010 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
1da177e4 | 1011 | { |
04b2fa9f | 1012 | struct aio_kiocb *req; |
a1c8eae7 | 1013 | |
d856f32a BL |
1014 | if (!get_reqs_available(ctx)) { |
1015 | user_refill_reqs_available(ctx); | |
1016 | if (!get_reqs_available(ctx)) | |
1017 | return NULL; | |
1018 | } | |
a1c8eae7 | 1019 | |
0460fef2 | 1020 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
1da177e4 | 1021 | if (unlikely(!req)) |
a1c8eae7 | 1022 | goto out_put; |
1da177e4 | 1023 | |
e34ecee2 KO |
1024 | percpu_ref_get(&ctx->reqs); |
1025 | ||
1da177e4 | 1026 | req->ki_ctx = ctx; |
080d676d | 1027 | return req; |
a1c8eae7 | 1028 | out_put: |
e1bdd5f2 | 1029 | put_reqs_available(ctx, 1); |
a1c8eae7 | 1030 | return NULL; |
1da177e4 LT |
1031 | } |
1032 | ||
04b2fa9f | 1033 | static void kiocb_free(struct aio_kiocb *req) |
1da177e4 | 1034 | { |
04b2fa9f CH |
1035 | if (req->common.ki_filp) |
1036 | fput(req->common.ki_filp); | |
13389010 DL |
1037 | if (req->ki_eventfd != NULL) |
1038 | eventfd_ctx_put(req->ki_eventfd); | |
1da177e4 | 1039 | kmem_cache_free(kiocb_cachep, req); |
1da177e4 LT |
1040 | } |
1041 | ||
d5470b59 | 1042 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
1da177e4 | 1043 | { |
db446a08 | 1044 | struct aio_ring __user *ring = (void __user *)ctx_id; |
abf137dd | 1045 | struct mm_struct *mm = current->mm; |
65c24491 | 1046 | struct kioctx *ctx, *ret = NULL; |
db446a08 BL |
1047 | struct kioctx_table *table; |
1048 | unsigned id; | |
1049 | ||
1050 | if (get_user(id, &ring->id)) | |
1051 | return NULL; | |
1da177e4 | 1052 | |
abf137dd | 1053 | rcu_read_lock(); |
db446a08 | 1054 | table = rcu_dereference(mm->ioctx_table); |
abf137dd | 1055 | |
db446a08 BL |
1056 | if (!table || id >= table->nr) |
1057 | goto out; | |
1da177e4 | 1058 | |
db446a08 | 1059 | ctx = table->table[id]; |
f30d704f | 1060 | if (ctx && ctx->user_id == ctx_id) { |
db446a08 BL |
1061 | percpu_ref_get(&ctx->users); |
1062 | ret = ctx; | |
1063 | } | |
1064 | out: | |
abf137dd | 1065 | rcu_read_unlock(); |
65c24491 | 1066 | return ret; |
1da177e4 LT |
1067 | } |
1068 | ||
1da177e4 LT |
1069 | /* aio_complete |
1070 | * Called when the io request on the given iocb is complete. | |
1da177e4 | 1071 | */ |
04b2fa9f | 1072 | static void aio_complete(struct kiocb *kiocb, long res, long res2) |
1da177e4 | 1073 | { |
04b2fa9f | 1074 | struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, common); |
1da177e4 | 1075 | struct kioctx *ctx = iocb->ki_ctx; |
1da177e4 | 1076 | struct aio_ring *ring; |
21b40200 | 1077 | struct io_event *ev_page, *event; |
d856f32a | 1078 | unsigned tail, pos, head; |
1da177e4 | 1079 | unsigned long flags; |
1da177e4 | 1080 | |
70fe2f48 JK |
1081 | if (kiocb->ki_flags & IOCB_WRITE) { |
1082 | struct file *file = kiocb->ki_filp; | |
1083 | ||
1084 | /* | |
1085 | * Tell lockdep we inherited freeze protection from submission | |
1086 | * thread. | |
1087 | */ | |
a12f1ae6 SL |
1088 | if (S_ISREG(file_inode(file)->i_mode)) |
1089 | __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); | |
70fe2f48 JK |
1090 | file_end_write(file); |
1091 | } | |
1092 | ||
20dcae32 ZB |
1093 | /* |
1094 | * Special case handling for sync iocbs: | |
1095 | * - events go directly into the iocb for fast handling | |
1096 | * - the sync task with the iocb in its stack holds the single iocb | |
1097 | * ref, no other paths have a way to get another ref | |
1098 | * - the sync task helpfully left a reference to itself in the iocb | |
1da177e4 | 1099 | */ |
04b2fa9f | 1100 | BUG_ON(is_sync_kiocb(kiocb)); |
1da177e4 | 1101 | |
0460fef2 KO |
1102 | if (iocb->ki_list.next) { |
1103 | unsigned long flags; | |
1104 | ||
1105 | spin_lock_irqsave(&ctx->ctx_lock, flags); | |
1106 | list_del(&iocb->ki_list); | |
1107 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); | |
1108 | } | |
11599eba | 1109 | |
0460fef2 KO |
1110 | /* |
1111 | * Add a completion event to the ring buffer. Must be done holding | |
4b30f07e | 1112 | * ctx->completion_lock to prevent other code from messing with the tail |
0460fef2 KO |
1113 | * pointer since we might be called from irq context. |
1114 | */ | |
1115 | spin_lock_irqsave(&ctx->completion_lock, flags); | |
1116 | ||
58c85dc2 | 1117 | tail = ctx->tail; |
21b40200 KO |
1118 | pos = tail + AIO_EVENTS_OFFSET; |
1119 | ||
58c85dc2 | 1120 | if (++tail >= ctx->nr_events) |
4bf69b2a | 1121 | tail = 0; |
1da177e4 | 1122 | |
58c85dc2 | 1123 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
1124 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
1125 | ||
04b2fa9f | 1126 | event->obj = (u64)(unsigned long)iocb->ki_user_iocb; |
1da177e4 LT |
1127 | event->data = iocb->ki_user_data; |
1128 | event->res = res; | |
1129 | event->res2 = res2; | |
1130 | ||
21b40200 | 1131 | kunmap_atomic(ev_page); |
58c85dc2 | 1132 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
21b40200 KO |
1133 | |
1134 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | |
04b2fa9f | 1135 | ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, |
caf4167a | 1136 | res, res2); |
1da177e4 LT |
1137 | |
1138 | /* after flagging the request as done, we | |
1139 | * must never even look at it again | |
1140 | */ | |
1141 | smp_wmb(); /* make event visible before updating tail */ | |
1142 | ||
58c85dc2 | 1143 | ctx->tail = tail; |
1da177e4 | 1144 | |
58c85dc2 | 1145 | ring = kmap_atomic(ctx->ring_pages[0]); |
d856f32a | 1146 | head = ring->head; |
21b40200 | 1147 | ring->tail = tail; |
e8e3c3d6 | 1148 | kunmap_atomic(ring); |
58c85dc2 | 1149 | flush_dcache_page(ctx->ring_pages[0]); |
1da177e4 | 1150 | |
d856f32a BL |
1151 | ctx->completed_events++; |
1152 | if (ctx->completed_events > 1) | |
1153 | refill_reqs_available(ctx, head, tail); | |
0460fef2 KO |
1154 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
1155 | ||
21b40200 | 1156 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
8d1c98b0 DL |
1157 | |
1158 | /* | |
1159 | * Check if the user asked us to deliver the result through an | |
1160 | * eventfd. The eventfd_signal() function is safe to be called | |
1161 | * from IRQ context. | |
1162 | */ | |
87c3a86e | 1163 | if (iocb->ki_eventfd != NULL) |
8d1c98b0 DL |
1164 | eventfd_signal(iocb->ki_eventfd, 1); |
1165 | ||
1da177e4 | 1166 | /* everything turned out well, dispose of the aiocb. */ |
57282d8f | 1167 | kiocb_free(iocb); |
1da177e4 | 1168 | |
6cb2a210 QB |
1169 | /* |
1170 | * We have to order our ring_info tail store above and test | |
1171 | * of the wait list below outside the wait lock. This is | |
1172 | * like in wake_up_bit() where clearing a bit has to be | |
1173 | * ordered with the unlocked test. | |
1174 | */ | |
1175 | smp_mb(); | |
1176 | ||
1da177e4 LT |
1177 | if (waitqueue_active(&ctx->wait)) |
1178 | wake_up(&ctx->wait); | |
1179 | ||
e34ecee2 | 1180 | percpu_ref_put(&ctx->reqs); |
1da177e4 LT |
1181 | } |
1182 | ||
2be4e7de | 1183 | /* aio_read_events_ring |
a31ad380 KO |
1184 | * Pull an event off of the ioctx's event ring. Returns the number of |
1185 | * events fetched | |
1da177e4 | 1186 | */ |
a31ad380 KO |
1187 | static long aio_read_events_ring(struct kioctx *ctx, |
1188 | struct io_event __user *event, long nr) | |
1da177e4 | 1189 | { |
1da177e4 | 1190 | struct aio_ring *ring; |
5ffac122 | 1191 | unsigned head, tail, pos; |
a31ad380 KO |
1192 | long ret = 0; |
1193 | int copy_ret; | |
1194 | ||
9c9ce763 DC |
1195 | /* |
1196 | * The mutex can block and wake us up and that will cause | |
1197 | * wait_event_interruptible_hrtimeout() to schedule without sleeping | |
1198 | * and repeat. This should be rare enough that it doesn't cause | |
1199 | * peformance issues. See the comment in read_events() for more detail. | |
1200 | */ | |
1201 | sched_annotate_sleep(); | |
58c85dc2 | 1202 | mutex_lock(&ctx->ring_lock); |
1da177e4 | 1203 | |
fa8a53c3 | 1204 | /* Access to ->ring_pages here is protected by ctx->ring_lock. */ |
58c85dc2 | 1205 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1206 | head = ring->head; |
5ffac122 | 1207 | tail = ring->tail; |
a31ad380 KO |
1208 | kunmap_atomic(ring); |
1209 | ||
2ff396be JM |
1210 | /* |
1211 | * Ensure that once we've read the current tail pointer, that | |
1212 | * we also see the events that were stored up to the tail. | |
1213 | */ | |
1214 | smp_rmb(); | |
1215 | ||
5ffac122 | 1216 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1da177e4 | 1217 | |
5ffac122 | 1218 | if (head == tail) |
1da177e4 LT |
1219 | goto out; |
1220 | ||
edfbbf38 BL |
1221 | head %= ctx->nr_events; |
1222 | tail %= ctx->nr_events; | |
1223 | ||
a31ad380 KO |
1224 | while (ret < nr) { |
1225 | long avail; | |
1226 | struct io_event *ev; | |
1227 | struct page *page; | |
1228 | ||
5ffac122 KO |
1229 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1230 | if (head == tail) | |
a31ad380 KO |
1231 | break; |
1232 | ||
1233 | avail = min(avail, nr - ret); | |
1234 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - | |
1235 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); | |
1236 | ||
1237 | pos = head + AIO_EVENTS_OFFSET; | |
58c85dc2 | 1238 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
a31ad380 KO |
1239 | pos %= AIO_EVENTS_PER_PAGE; |
1240 | ||
1241 | ev = kmap(page); | |
1242 | copy_ret = copy_to_user(event + ret, ev + pos, | |
1243 | sizeof(*ev) * avail); | |
1244 | kunmap(page); | |
1245 | ||
1246 | if (unlikely(copy_ret)) { | |
1247 | ret = -EFAULT; | |
1248 | goto out; | |
1249 | } | |
1250 | ||
1251 | ret += avail; | |
1252 | head += avail; | |
58c85dc2 | 1253 | head %= ctx->nr_events; |
1da177e4 | 1254 | } |
1da177e4 | 1255 | |
58c85dc2 | 1256 | ring = kmap_atomic(ctx->ring_pages[0]); |
a31ad380 | 1257 | ring->head = head; |
91d80a84 | 1258 | kunmap_atomic(ring); |
58c85dc2 | 1259 | flush_dcache_page(ctx->ring_pages[0]); |
a31ad380 | 1260 | |
5ffac122 | 1261 | pr_debug("%li h%u t%u\n", ret, head, tail); |
a31ad380 | 1262 | out: |
58c85dc2 | 1263 | mutex_unlock(&ctx->ring_lock); |
a31ad380 | 1264 | |
1da177e4 LT |
1265 | return ret; |
1266 | } | |
1267 | ||
a31ad380 KO |
1268 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1269 | struct io_event __user *event, long *i) | |
1da177e4 | 1270 | { |
a31ad380 | 1271 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1da177e4 | 1272 | |
a31ad380 KO |
1273 | if (ret > 0) |
1274 | *i += ret; | |
1da177e4 | 1275 | |
a31ad380 KO |
1276 | if (unlikely(atomic_read(&ctx->dead))) |
1277 | ret = -EINVAL; | |
1da177e4 | 1278 | |
a31ad380 KO |
1279 | if (!*i) |
1280 | *i = ret; | |
1da177e4 | 1281 | |
a31ad380 | 1282 | return ret < 0 || *i >= min_nr; |
1da177e4 LT |
1283 | } |
1284 | ||
a31ad380 | 1285 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1da177e4 LT |
1286 | struct io_event __user *event, |
1287 | struct timespec __user *timeout) | |
1288 | { | |
2456e855 | 1289 | ktime_t until = KTIME_MAX; |
a31ad380 | 1290 | long ret = 0; |
1da177e4 | 1291 | |
1da177e4 LT |
1292 | if (timeout) { |
1293 | struct timespec ts; | |
a31ad380 | 1294 | |
1da177e4 | 1295 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) |
a31ad380 | 1296 | return -EFAULT; |
1da177e4 | 1297 | |
a31ad380 | 1298 | until = timespec_to_ktime(ts); |
1da177e4 LT |
1299 | } |
1300 | ||
a31ad380 KO |
1301 | /* |
1302 | * Note that aio_read_events() is being called as the conditional - i.e. | |
1303 | * we're calling it after prepare_to_wait() has set task state to | |
1304 | * TASK_INTERRUPTIBLE. | |
1305 | * | |
1306 | * But aio_read_events() can block, and if it blocks it's going to flip | |
1307 | * the task state back to TASK_RUNNING. | |
1308 | * | |
1309 | * This should be ok, provided it doesn't flip the state back to | |
1310 | * TASK_RUNNING and return 0 too much - that causes us to spin. That | |
1311 | * will only happen if the mutex_lock() call blocks, and we then find | |
1312 | * the ringbuffer empty. So in practice we should be ok, but it's | |
1313 | * something to be aware of when touching this code. | |
1314 | */ | |
2456e855 | 1315 | if (until == 0) |
5f785de5 FZ |
1316 | aio_read_events(ctx, min_nr, nr, event, &ret); |
1317 | else | |
1318 | wait_event_interruptible_hrtimeout(ctx->wait, | |
1319 | aio_read_events(ctx, min_nr, nr, event, &ret), | |
1320 | until); | |
1da177e4 | 1321 | |
a31ad380 KO |
1322 | if (!ret && signal_pending(current)) |
1323 | ret = -EINTR; | |
1da177e4 | 1324 | |
a31ad380 | 1325 | return ret; |
1da177e4 LT |
1326 | } |
1327 | ||
1da177e4 LT |
1328 | /* sys_io_setup: |
1329 | * Create an aio_context capable of receiving at least nr_events. | |
1330 | * ctxp must not point to an aio_context that already exists, and | |
1331 | * must be initialized to 0 prior to the call. On successful | |
1332 | * creation of the aio_context, *ctxp is filled in with the resulting | |
1333 | * handle. May fail with -EINVAL if *ctxp is not initialized, | |
1334 | * if the specified nr_events exceeds internal limits. May fail | |
1335 | * with -EAGAIN if the specified nr_events exceeds the user's limit | |
1336 | * of available events. May fail with -ENOMEM if insufficient kernel | |
1337 | * resources are available. May fail with -EFAULT if an invalid | |
1338 | * pointer is passed for ctxp. Will fail with -ENOSYS if not | |
1339 | * implemented. | |
1340 | */ | |
002c8976 | 1341 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1da177e4 LT |
1342 | { |
1343 | struct kioctx *ioctx = NULL; | |
1344 | unsigned long ctx; | |
1345 | long ret; | |
1346 | ||
1347 | ret = get_user(ctx, ctxp); | |
1348 | if (unlikely(ret)) | |
1349 | goto out; | |
1350 | ||
1351 | ret = -EINVAL; | |
d55b5fda | 1352 | if (unlikely(ctx || nr_events == 0)) { |
acd88d4e | 1353 | pr_debug("EINVAL: ctx %lu nr_events %u\n", |
d55b5fda | 1354 | ctx, nr_events); |
1da177e4 LT |
1355 | goto out; |
1356 | } | |
1357 | ||
1358 | ioctx = ioctx_alloc(nr_events); | |
1359 | ret = PTR_ERR(ioctx); | |
1360 | if (!IS_ERR(ioctx)) { | |
1361 | ret = put_user(ioctx->user_id, ctxp); | |
a2e1859a | 1362 | if (ret) |
e02ba72a | 1363 | kill_ioctx(current->mm, ioctx, NULL); |
723be6e3 | 1364 | percpu_ref_put(&ioctx->users); |
1da177e4 LT |
1365 | } |
1366 | ||
1367 | out: | |
1368 | return ret; | |
1369 | } | |
1370 | ||
c00d2c7e AV |
1371 | #ifdef CONFIG_COMPAT |
1372 | COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) | |
1373 | { | |
1374 | struct kioctx *ioctx = NULL; | |
1375 | unsigned long ctx; | |
1376 | long ret; | |
1377 | ||
1378 | ret = get_user(ctx, ctx32p); | |
1379 | if (unlikely(ret)) | |
1380 | goto out; | |
1381 | ||
1382 | ret = -EINVAL; | |
1383 | if (unlikely(ctx || nr_events == 0)) { | |
1384 | pr_debug("EINVAL: ctx %lu nr_events %u\n", | |
1385 | ctx, nr_events); | |
1386 | goto out; | |
1387 | } | |
1388 | ||
1389 | ioctx = ioctx_alloc(nr_events); | |
1390 | ret = PTR_ERR(ioctx); | |
1391 | if (!IS_ERR(ioctx)) { | |
1392 | /* truncating is ok because it's a user address */ | |
1393 | ret = put_user((u32)ioctx->user_id, ctx32p); | |
1394 | if (ret) | |
1395 | kill_ioctx(current->mm, ioctx, NULL); | |
1396 | percpu_ref_put(&ioctx->users); | |
1397 | } | |
1398 | ||
1399 | out: | |
1400 | return ret; | |
1401 | } | |
1402 | #endif | |
1403 | ||
1da177e4 LT |
1404 | /* sys_io_destroy: |
1405 | * Destroy the aio_context specified. May cancel any outstanding | |
1406 | * AIOs and block on completion. Will fail with -ENOSYS if not | |
642b5123 | 1407 | * implemented. May fail with -EINVAL if the context pointed to |
1da177e4 LT |
1408 | * is invalid. |
1409 | */ | |
002c8976 | 1410 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1da177e4 LT |
1411 | { |
1412 | struct kioctx *ioctx = lookup_ioctx(ctx); | |
1413 | if (likely(NULL != ioctx)) { | |
dc48e56d | 1414 | struct ctx_rq_wait wait; |
fb2d4483 | 1415 | int ret; |
e02ba72a | 1416 | |
dc48e56d JA |
1417 | init_completion(&wait.comp); |
1418 | atomic_set(&wait.count, 1); | |
1419 | ||
e02ba72a AP |
1420 | /* Pass requests_done to kill_ioctx() where it can be set |
1421 | * in a thread-safe way. If we try to set it here then we have | |
1422 | * a race condition if two io_destroy() called simultaneously. | |
1423 | */ | |
dc48e56d | 1424 | ret = kill_ioctx(current->mm, ioctx, &wait); |
723be6e3 | 1425 | percpu_ref_put(&ioctx->users); |
e02ba72a AP |
1426 | |
1427 | /* Wait until all IO for the context are done. Otherwise kernel | |
1428 | * keep using user-space buffers even if user thinks the context | |
1429 | * is destroyed. | |
1430 | */ | |
fb2d4483 | 1431 | if (!ret) |
dc48e56d | 1432 | wait_for_completion(&wait.comp); |
e02ba72a | 1433 | |
fb2d4483 | 1434 | return ret; |
1da177e4 | 1435 | } |
acd88d4e | 1436 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1437 | return -EINVAL; |
1438 | } | |
1439 | ||
89319d31 CH |
1440 | static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, |
1441 | bool vectored, bool compat, struct iov_iter *iter) | |
eed4e51f | 1442 | { |
89319d31 CH |
1443 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; |
1444 | size_t len = iocb->aio_nbytes; | |
1445 | ||
1446 | if (!vectored) { | |
1447 | ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); | |
1448 | *iovec = NULL; | |
1449 | return ret; | |
1450 | } | |
9d85cba7 JM |
1451 | #ifdef CONFIG_COMPAT |
1452 | if (compat) | |
89319d31 CH |
1453 | return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec, |
1454 | iter); | |
9d85cba7 | 1455 | #endif |
89319d31 | 1456 | return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter); |
eed4e51f BP |
1457 | } |
1458 | ||
89319d31 CH |
1459 | static inline ssize_t aio_ret(struct kiocb *req, ssize_t ret) |
1460 | { | |
1461 | switch (ret) { | |
1462 | case -EIOCBQUEUED: | |
1463 | return ret; | |
1464 | case -ERESTARTSYS: | |
1465 | case -ERESTARTNOINTR: | |
1466 | case -ERESTARTNOHAND: | |
1467 | case -ERESTART_RESTARTBLOCK: | |
1468 | /* | |
1469 | * There's no easy way to restart the syscall since other AIO's | |
1470 | * may be already running. Just fail this IO with EINTR. | |
1471 | */ | |
1472 | ret = -EINTR; | |
1473 | /*FALLTHRU*/ | |
1474 | default: | |
1475 | aio_complete(req, ret, 0); | |
1476 | return 0; | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, | |
1481 | bool compat) | |
1da177e4 | 1482 | { |
41ef4eb8 | 1483 | struct file *file = req->ki_filp; |
00fefb9c | 1484 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
293bc982 | 1485 | struct iov_iter iter; |
89319d31 | 1486 | ssize_t ret; |
1da177e4 | 1487 | |
89319d31 CH |
1488 | if (unlikely(!(file->f_mode & FMODE_READ))) |
1489 | return -EBADF; | |
1490 | if (unlikely(!file->f_op->read_iter)) | |
1491 | return -EINVAL; | |
73a7075e | 1492 | |
89319d31 CH |
1493 | ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); |
1494 | if (ret) | |
1495 | return ret; | |
1496 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); | |
1497 | if (!ret) | |
bb7462b6 | 1498 | ret = aio_ret(req, call_read_iter(file, req, &iter)); |
89319d31 CH |
1499 | kfree(iovec); |
1500 | return ret; | |
1501 | } | |
73a7075e | 1502 | |
89319d31 CH |
1503 | static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, |
1504 | bool compat) | |
1505 | { | |
1506 | struct file *file = req->ki_filp; | |
1507 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | |
1508 | struct iov_iter iter; | |
1509 | ssize_t ret; | |
41ef4eb8 | 1510 | |
89319d31 CH |
1511 | if (unlikely(!(file->f_mode & FMODE_WRITE))) |
1512 | return -EBADF; | |
1513 | if (unlikely(!file->f_op->write_iter)) | |
41ef4eb8 | 1514 | return -EINVAL; |
1da177e4 | 1515 | |
89319d31 CH |
1516 | ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); |
1517 | if (ret) | |
1518 | return ret; | |
1519 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); | |
1520 | if (!ret) { | |
70fe2f48 | 1521 | req->ki_flags |= IOCB_WRITE; |
89319d31 | 1522 | file_start_write(file); |
bb7462b6 | 1523 | ret = aio_ret(req, call_write_iter(file, req, &iter)); |
70fe2f48 JK |
1524 | /* |
1525 | * We release freeze protection in aio_complete(). Fool lockdep | |
1526 | * by telling it the lock got released so that it doesn't | |
1527 | * complain about held lock when we return to userspace. | |
1528 | */ | |
a12f1ae6 SL |
1529 | if (S_ISREG(file_inode(file)->i_mode)) |
1530 | __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); | |
41ef4eb8 | 1531 | } |
89319d31 CH |
1532 | kfree(iovec); |
1533 | return ret; | |
1da177e4 LT |
1534 | } |
1535 | ||
d5470b59 | 1536 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
a1c8eae7 | 1537 | struct iocb *iocb, bool compat) |
1da177e4 | 1538 | { |
04b2fa9f | 1539 | struct aio_kiocb *req; |
89319d31 | 1540 | struct file *file; |
1da177e4 LT |
1541 | ssize_t ret; |
1542 | ||
1543 | /* enforce forwards compatibility on users */ | |
9830f4be | 1544 | if (unlikely(iocb->aio_reserved2)) { |
caf4167a | 1545 | pr_debug("EINVAL: reserve field set\n"); |
1da177e4 LT |
1546 | return -EINVAL; |
1547 | } | |
1548 | ||
1549 | /* prevent overflows */ | |
1550 | if (unlikely( | |
1551 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | |
1552 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | |
1553 | ((ssize_t)iocb->aio_nbytes < 0) | |
1554 | )) { | |
acd88d4e | 1555 | pr_debug("EINVAL: overflow check\n"); |
1da177e4 LT |
1556 | return -EINVAL; |
1557 | } | |
1558 | ||
41ef4eb8 | 1559 | req = aio_get_req(ctx); |
1d98ebfc | 1560 | if (unlikely(!req)) |
1da177e4 | 1561 | return -EAGAIN; |
1d98ebfc | 1562 | |
89319d31 | 1563 | req->common.ki_filp = file = fget(iocb->aio_fildes); |
04b2fa9f | 1564 | if (unlikely(!req->common.ki_filp)) { |
1d98ebfc KO |
1565 | ret = -EBADF; |
1566 | goto out_put_req; | |
1da177e4 | 1567 | } |
04b2fa9f CH |
1568 | req->common.ki_pos = iocb->aio_offset; |
1569 | req->common.ki_complete = aio_complete; | |
2ba48ce5 | 1570 | req->common.ki_flags = iocb_flags(req->common.ki_filp); |
1d98ebfc | 1571 | |
9c3060be DL |
1572 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1573 | /* | |
1574 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | |
1575 | * instance of the file* now. The file descriptor must be | |
1576 | * an eventfd() fd, and will be signaled for each completed | |
1577 | * event using the eventfd_signal() function. | |
1578 | */ | |
13389010 | 1579 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
801678c5 | 1580 | if (IS_ERR(req->ki_eventfd)) { |
9c3060be | 1581 | ret = PTR_ERR(req->ki_eventfd); |
87c3a86e | 1582 | req->ki_eventfd = NULL; |
9c3060be DL |
1583 | goto out_put_req; |
1584 | } | |
04b2fa9f CH |
1585 | |
1586 | req->common.ki_flags |= IOCB_EVENTFD; | |
9c3060be | 1587 | } |
1da177e4 | 1588 | |
9830f4be GR |
1589 | ret = kiocb_set_rw_flags(&req->common, iocb->aio_rw_flags); |
1590 | if (unlikely(ret)) { | |
1591 | pr_debug("EINVAL: aio_rw_flags\n"); | |
1592 | goto out_put_req; | |
1593 | } | |
1594 | ||
8a660890 | 1595 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1da177e4 | 1596 | if (unlikely(ret)) { |
caf4167a | 1597 | pr_debug("EFAULT: aio_key\n"); |
1da177e4 LT |
1598 | goto out_put_req; |
1599 | } | |
1600 | ||
04b2fa9f | 1601 | req->ki_user_iocb = user_iocb; |
1da177e4 | 1602 | req->ki_user_data = iocb->aio_data; |
1da177e4 | 1603 | |
89319d31 CH |
1604 | get_file(file); |
1605 | switch (iocb->aio_lio_opcode) { | |
1606 | case IOCB_CMD_PREAD: | |
1607 | ret = aio_read(&req->common, iocb, false, compat); | |
1608 | break; | |
1609 | case IOCB_CMD_PWRITE: | |
1610 | ret = aio_write(&req->common, iocb, false, compat); | |
1611 | break; | |
1612 | case IOCB_CMD_PREADV: | |
1613 | ret = aio_read(&req->common, iocb, true, compat); | |
1614 | break; | |
1615 | case IOCB_CMD_PWRITEV: | |
1616 | ret = aio_write(&req->common, iocb, true, compat); | |
1617 | break; | |
1618 | default: | |
1619 | pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); | |
1620 | ret = -EINVAL; | |
1621 | break; | |
1622 | } | |
1623 | fput(file); | |
41003a7b | 1624 | |
89319d31 CH |
1625 | if (ret && ret != -EIOCBQUEUED) |
1626 | goto out_put_req; | |
1da177e4 | 1627 | return 0; |
1da177e4 | 1628 | out_put_req: |
e1bdd5f2 | 1629 | put_reqs_available(ctx, 1); |
e34ecee2 | 1630 | percpu_ref_put(&ctx->reqs); |
57282d8f | 1631 | kiocb_free(req); |
1da177e4 LT |
1632 | return ret; |
1633 | } | |
1634 | ||
c00d2c7e AV |
1635 | static long do_io_submit(aio_context_t ctx_id, long nr, |
1636 | struct iocb __user *__user *iocbpp, bool compat) | |
1da177e4 LT |
1637 | { |
1638 | struct kioctx *ctx; | |
1639 | long ret = 0; | |
080d676d | 1640 | int i = 0; |
9f5b9425 | 1641 | struct blk_plug plug; |
1da177e4 LT |
1642 | |
1643 | if (unlikely(nr < 0)) | |
1644 | return -EINVAL; | |
1645 | ||
75e1c70f JM |
1646 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1647 | nr = LONG_MAX/sizeof(*iocbpp); | |
1648 | ||
1da177e4 LT |
1649 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1650 | return -EFAULT; | |
1651 | ||
1652 | ctx = lookup_ioctx(ctx_id); | |
1653 | if (unlikely(!ctx)) { | |
caf4167a | 1654 | pr_debug("EINVAL: invalid context id\n"); |
1da177e4 LT |
1655 | return -EINVAL; |
1656 | } | |
1657 | ||
9f5b9425 SL |
1658 | blk_start_plug(&plug); |
1659 | ||
1da177e4 LT |
1660 | /* |
1661 | * AKPM: should this return a partial result if some of the IOs were | |
1662 | * successfully submitted? | |
1663 | */ | |
1664 | for (i=0; i<nr; i++) { | |
1665 | struct iocb __user *user_iocb; | |
1666 | struct iocb tmp; | |
1667 | ||
1668 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { | |
1669 | ret = -EFAULT; | |
1670 | break; | |
1671 | } | |
1672 | ||
1673 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { | |
1674 | ret = -EFAULT; | |
1675 | break; | |
1676 | } | |
1677 | ||
a1c8eae7 | 1678 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1da177e4 LT |
1679 | if (ret) |
1680 | break; | |
1681 | } | |
9f5b9425 | 1682 | blk_finish_plug(&plug); |
1da177e4 | 1683 | |
723be6e3 | 1684 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
1685 | return i ? i : ret; |
1686 | } | |
1687 | ||
9d85cba7 JM |
1688 | /* sys_io_submit: |
1689 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | |
1690 | * the number of iocbs queued. May return -EINVAL if the aio_context | |
1691 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at | |
1692 | * *iocbpp[0] is not properly initialized, if the operation specified | |
1693 | * is invalid for the file descriptor in the iocb. May fail with | |
1694 | * -EFAULT if any of the data structures point to invalid data. May | |
1695 | * fail with -EBADF if the file descriptor specified in the first | |
1696 | * iocb is invalid. May fail with -EAGAIN if insufficient resources | |
1697 | * are available to queue any iocbs. Will return 0 if nr is 0. Will | |
1698 | * fail with -ENOSYS if not implemented. | |
1699 | */ | |
1700 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |
1701 | struct iocb __user * __user *, iocbpp) | |
1702 | { | |
1703 | return do_io_submit(ctx_id, nr, iocbpp, 0); | |
1704 | } | |
1705 | ||
c00d2c7e AV |
1706 | #ifdef CONFIG_COMPAT |
1707 | static inline long | |
1708 | copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) | |
1709 | { | |
1710 | compat_uptr_t uptr; | |
1711 | int i; | |
1712 | ||
1713 | for (i = 0; i < nr; ++i) { | |
1714 | if (get_user(uptr, ptr32 + i)) | |
1715 | return -EFAULT; | |
1716 | if (put_user(compat_ptr(uptr), ptr64 + i)) | |
1717 | return -EFAULT; | |
1718 | } | |
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | #define MAX_AIO_SUBMITS (PAGE_SIZE/sizeof(struct iocb *)) | |
1723 | ||
1724 | COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, | |
1725 | int, nr, u32 __user *, iocb) | |
1726 | { | |
1727 | struct iocb __user * __user *iocb64; | |
1728 | long ret; | |
1729 | ||
1730 | if (unlikely(nr < 0)) | |
1731 | return -EINVAL; | |
1732 | ||
1733 | if (nr > MAX_AIO_SUBMITS) | |
1734 | nr = MAX_AIO_SUBMITS; | |
1735 | ||
1736 | iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); | |
1737 | ret = copy_iocb(nr, iocb, iocb64); | |
1738 | if (!ret) | |
1739 | ret = do_io_submit(ctx_id, nr, iocb64, 1); | |
1740 | return ret; | |
1741 | } | |
1742 | #endif | |
1743 | ||
1da177e4 LT |
1744 | /* lookup_kiocb |
1745 | * Finds a given iocb for cancellation. | |
1da177e4 | 1746 | */ |
04b2fa9f CH |
1747 | static struct aio_kiocb * |
1748 | lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) | |
1da177e4 | 1749 | { |
04b2fa9f | 1750 | struct aio_kiocb *kiocb; |
d00689af ZB |
1751 | |
1752 | assert_spin_locked(&ctx->ctx_lock); | |
1753 | ||
8a660890 KO |
1754 | if (key != KIOCB_KEY) |
1755 | return NULL; | |
1756 | ||
1da177e4 | 1757 | /* TODO: use a hash or array, this sucks. */ |
04b2fa9f CH |
1758 | list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { |
1759 | if (kiocb->ki_user_iocb == iocb) | |
1da177e4 LT |
1760 | return kiocb; |
1761 | } | |
1762 | return NULL; | |
1763 | } | |
1764 | ||
1765 | /* sys_io_cancel: | |
1766 | * Attempts to cancel an iocb previously passed to io_submit. If | |
1767 | * the operation is successfully cancelled, the resulting event is | |
1768 | * copied into the memory pointed to by result without being placed | |
1769 | * into the completion queue and 0 is returned. May fail with | |
1770 | * -EFAULT if any of the data structures pointed to are invalid. | |
1771 | * May fail with -EINVAL if aio_context specified by ctx_id is | |
1772 | * invalid. May fail with -EAGAIN if the iocb specified was not | |
1773 | * cancelled. Will fail with -ENOSYS if not implemented. | |
1774 | */ | |
002c8976 HC |
1775 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1776 | struct io_event __user *, result) | |
1da177e4 | 1777 | { |
1da177e4 | 1778 | struct kioctx *ctx; |
04b2fa9f | 1779 | struct aio_kiocb *kiocb; |
1da177e4 LT |
1780 | u32 key; |
1781 | int ret; | |
1782 | ||
1783 | ret = get_user(key, &iocb->aio_key); | |
1784 | if (unlikely(ret)) | |
1785 | return -EFAULT; | |
1786 | ||
1787 | ctx = lookup_ioctx(ctx_id); | |
1788 | if (unlikely(!ctx)) | |
1789 | return -EINVAL; | |
1790 | ||
1791 | spin_lock_irq(&ctx->ctx_lock); | |
906b973c | 1792 | |
1da177e4 | 1793 | kiocb = lookup_kiocb(ctx, iocb, key); |
906b973c | 1794 | if (kiocb) |
d52a8f9e | 1795 | ret = kiocb_cancel(kiocb); |
906b973c KO |
1796 | else |
1797 | ret = -EINVAL; | |
1798 | ||
1da177e4 LT |
1799 | spin_unlock_irq(&ctx->ctx_lock); |
1800 | ||
906b973c | 1801 | if (!ret) { |
bec68faa KO |
1802 | /* |
1803 | * The result argument is no longer used - the io_event is | |
1804 | * always delivered via the ring buffer. -EINPROGRESS indicates | |
1805 | * cancellation is progress: | |
906b973c | 1806 | */ |
bec68faa | 1807 | ret = -EINPROGRESS; |
906b973c | 1808 | } |
1da177e4 | 1809 | |
723be6e3 | 1810 | percpu_ref_put(&ctx->users); |
1da177e4 LT |
1811 | |
1812 | return ret; | |
1813 | } | |
1814 | ||
1815 | /* io_getevents: | |
1816 | * Attempts to read at least min_nr events and up to nr events from | |
642b5123 ST |
1817 | * the completion queue for the aio_context specified by ctx_id. If |
1818 | * it succeeds, the number of read events is returned. May fail with | |
1819 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is | |
1820 | * out of range, if timeout is out of range. May fail with -EFAULT | |
1821 | * if any of the memory specified is invalid. May return 0 or | |
1822 | * < min_nr if the timeout specified by timeout has elapsed | |
1823 | * before sufficient events are available, where timeout == NULL | |
1824 | * specifies an infinite timeout. Note that the timeout pointed to by | |
6900807c | 1825 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1da177e4 | 1826 | */ |
002c8976 HC |
1827 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1828 | long, min_nr, | |
1829 | long, nr, | |
1830 | struct io_event __user *, events, | |
1831 | struct timespec __user *, timeout) | |
1da177e4 LT |
1832 | { |
1833 | struct kioctx *ioctx = lookup_ioctx(ctx_id); | |
1834 | long ret = -EINVAL; | |
1835 | ||
1836 | if (likely(ioctx)) { | |
2e410255 | 1837 | if (likely(min_nr <= nr && min_nr >= 0)) |
1da177e4 | 1838 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
723be6e3 | 1839 | percpu_ref_put(&ioctx->users); |
1da177e4 | 1840 | } |
1da177e4 LT |
1841 | return ret; |
1842 | } | |
c00d2c7e AV |
1843 | |
1844 | #ifdef CONFIG_COMPAT | |
1845 | COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id, | |
1846 | compat_long_t, min_nr, | |
1847 | compat_long_t, nr, | |
1848 | struct io_event __user *, events, | |
1849 | struct compat_timespec __user *, timeout) | |
1850 | { | |
1851 | struct timespec t; | |
1852 | struct timespec __user *ut = NULL; | |
1853 | ||
1854 | if (timeout) { | |
1855 | if (compat_get_timespec(&t, timeout)) | |
1856 | return -EFAULT; | |
1857 | ||
1858 | ut = compat_alloc_user_space(sizeof(*ut)); | |
1859 | if (copy_to_user(ut, &t, sizeof(t))) | |
1860 | return -EFAULT; | |
1861 | } | |
1862 | return sys_io_getevents(ctx_id, min_nr, nr, events, ut); | |
1863 | } | |
1864 | #endif |