]> Git Repo - linux.git/blame - io_uring/rsrc.c
af_unix: Stop recv(MSG_PEEK) at consumed OOB skb.
[linux.git] / io_uring / rsrc.c
CommitLineData
73572984
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/nospec.h>
9#include <linux/hugetlb.h>
10#include <linux/compat.h>
11#include <linux/io_uring.h>
12
13#include <uapi/linux/io_uring.h>
14
73572984 15#include "io_uring.h"
414d0f45 16#include "alloc_cache.h"
73572984
JA
17#include "openclose.h"
18#include "rsrc.h"
f15ed8b4 19#include "memmap.h"
73572984
JA
20
21struct io_rsrc_update {
22 struct file *file;
23 u64 arg;
24 u32 nr_args;
25 u32 offset;
26};
27
fc7f3a8d 28static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
73572984
JA
29static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
30 struct io_mapped_ubuf **pimu,
31 struct page **last_hpage);
32
73572984
JA
33/* only define max */
34#define IORING_MAX_FIXED_FILES (1U << 20)
35#define IORING_MAX_REG_BUFFERS (1U << 14)
36
19a63c40
PB
37static const struct io_mapped_ubuf dummy_ubuf = {
38 /* set invalid range, so io_import_fixed() fails meeting it */
39 .ubuf = -1UL,
40 .ubuf_end = 0,
41};
42
6a9ce66f 43int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
73572984
JA
44{
45 unsigned long page_limit, cur_pages, new_pages;
46
6a9ce66f
PB
47 if (!nr_pages)
48 return 0;
49
73572984
JA
50 /* Don't allow more pages than we can safely lock */
51 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
52
4ccc6db0 53 cur_pages = atomic_long_read(&user->locked_vm);
73572984 54 do {
73572984
JA
55 new_pages = cur_pages + nr_pages;
56 if (new_pages > page_limit)
57 return -ENOMEM;
4ccc6db0
UB
58 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
59 &cur_pages, new_pages));
73572984
JA
60 return 0;
61}
62
63static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64{
65 if (ctx->user)
66 __io_unaccount_mem(ctx->user, nr_pages);
67
68 if (ctx->mm_account)
69 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
70}
71
72static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
73{
74 int ret;
75
76 if (ctx->user) {
77 ret = __io_account_mem(ctx->user, nr_pages);
78 if (ret)
79 return ret;
80 }
81
82 if (ctx->mm_account)
83 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
84
85 return 0;
86}
87
88static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
89 void __user *arg, unsigned index)
90{
91 struct iovec __user *src;
92
93#ifdef CONFIG_COMPAT
94 if (ctx->compat) {
95 struct compat_iovec __user *ciovs;
96 struct compat_iovec ciov;
97
98 ciovs = (struct compat_iovec __user *) arg;
99 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
100 return -EFAULT;
101
102 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
103 dst->iov_len = ciov.iov_len;
104 return 0;
105 }
106#endif
107 src = (struct iovec __user *) arg;
108 if (copy_from_user(dst, &src[index], sizeof(*dst)))
109 return -EFAULT;
110 return 0;
111}
112
113static int io_buffer_validate(struct iovec *iov)
114{
115 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
116
117 /*
118 * Don't impose further limits on the size and buffer
119 * constraints here, we'll -EINVAL later when IO is
120 * submitted if they are wrong.
121 */
122 if (!iov->iov_base)
123 return iov->iov_len ? -EFAULT : 0;
124 if (!iov->iov_len)
125 return -EFAULT;
126
127 /* arbitrary limit, but we need something */
128 if (iov->iov_len > SZ_1G)
129 return -EFAULT;
130
131 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
132 return -EOVERFLOW;
133
134 return 0;
135}
136
137static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
138{
139 struct io_mapped_ubuf *imu = *slot;
140 unsigned int i;
141
19a63c40 142 if (imu != &dummy_ubuf) {
73572984
JA
143 for (i = 0; i < imu->nr_bvecs; i++)
144 unpin_user_page(imu->bvec[i].bv_page);
145 if (imu->acct_pages)
146 io_unaccount_mem(ctx, imu->acct_pages);
147 kvfree(imu);
148 }
149 *slot = NULL;
150}
151
29b26c55 152static void io_rsrc_put_work(struct io_rsrc_node *node)
ff7c75ec 153{
29b26c55 154 struct io_rsrc_put *prsrc = &node->item;
ff7c75ec 155
36b9818a 156 if (prsrc->tag)
2236b390 157 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
fc7f3a8d 158
2236b390 159 switch (node->type) {
fc7f3a8d 160 case IORING_RSRC_FILE:
6e5e6d27 161 fput(prsrc->file);
fc7f3a8d
PB
162 break;
163 case IORING_RSRC_BUFFER:
2236b390 164 io_rsrc_buf_put(node->ctx, prsrc);
fc7f3a8d
PB
165 break;
166 default:
167 WARN_ON_ONCE(1);
168 break;
169 }
ff7c75ec
PB
170}
171
9eae8655 172void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
73572984 173{
414d0f45 174 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
9eae8655 175 kfree(node);
73572984
JA
176}
177
ef8ae64f 178void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
2236b390 179 __must_hold(&node->ctx->uring_lock)
73572984 180{
2236b390 181 struct io_ring_ctx *ctx = node->ctx;
73572984 182
73572984
JA
183 while (!list_empty(&ctx->rsrc_ref_list)) {
184 node = list_first_entry(&ctx->rsrc_ref_list,
185 struct io_rsrc_node, node);
186 /* recycle ref nodes in order */
c732ea24 187 if (node->refs)
73572984 188 break;
36b9818a 189 list_del(&node->node);
4130b499
PB
190
191 if (likely(!node->empty))
29b26c55 192 io_rsrc_put_work(node);
4130b499 193 io_rsrc_node_destroy(ctx, node);
d34b1b0b 194 }
0b222eeb
PB
195 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
196 wake_up_all(&ctx->rsrc_quiesce_wq);
73572984
JA
197}
198
2933ae6e 199struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
73572984
JA
200{
201 struct io_rsrc_node *ref_node;
9eae8655 202
414d0f45
JA
203 ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
204 if (!ref_node) {
9eae8655
PB
205 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
206 if (!ref_node)
207 return NULL;
208 }
73572984 209
2236b390 210 ref_node->ctx = ctx;
26147da3 211 ref_node->empty = 0;
c376644f 212 ref_node->refs = 1;
73572984
JA
213 return ref_node;
214}
215
73572984
JA
216__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
217 struct io_ring_ctx *ctx)
218{
c376644f 219 struct io_rsrc_node *backup;
4ea15b56 220 DEFINE_WAIT(we);
73572984
JA
221 int ret;
222
c376644f 223 /* As We may drop ->uring_lock, other task may have started quiesce */
73572984
JA
224 if (data->quiesce)
225 return -ENXIO;
c376644f
PB
226
227 backup = io_rsrc_node_alloc(ctx);
228 if (!backup)
229 return -ENOMEM;
26147da3 230 ctx->rsrc_node->empty = true;
2236b390 231 ctx->rsrc_node->type = -1;
c376644f
PB
232 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
233 io_put_rsrc_node(ctx, ctx->rsrc_node);
234 ctx->rsrc_node = backup;
77e3202a 235
0b222eeb 236 if (list_empty(&ctx->rsrc_ref_list))
77e3202a 237 return 0;
73572984 238
7d481e03
PB
239 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
240 atomic_set(&ctx->cq_wait_nr, 1);
241 smp_mb();
242 }
243
0b222eeb 244 ctx->rsrc_quiesce++;
73572984
JA
245 data->quiesce = true;
246 do {
4ea15b56 247 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
eef81fca 248 mutex_unlock(&ctx->uring_lock);
4ea15b56 249
ef67fcb4 250 ret = io_run_task_work_sig(ctx);
77e3202a 251 if (ret < 0) {
54559642 252 __set_current_state(TASK_RUNNING);
77e3202a 253 mutex_lock(&ctx->uring_lock);
0b222eeb 254 if (list_empty(&ctx->rsrc_ref_list))
757ef468 255 ret = 0;
77e3202a
PB
256 break;
257 }
4ea15b56
PB
258
259 schedule();
260 __set_current_state(TASK_RUNNING);
eef81fca
PB
261 mutex_lock(&ctx->uring_lock);
262 ret = 0;
0b222eeb 263 } while (!list_empty(&ctx->rsrc_ref_list));
73572984 264
4ea15b56
PB
265 finish_wait(&ctx->rsrc_quiesce_wq, &we);
266 data->quiesce = false;
0b222eeb
PB
267 ctx->rsrc_quiesce--;
268
7d481e03
PB
269 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
270 atomic_set(&ctx->cq_wait_nr, 0);
271 smp_mb();
272 }
73572984
JA
273 return ret;
274}
275
276static void io_free_page_table(void **table, size_t size)
277{
278 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
279
280 for (i = 0; i < nr_tables; i++)
281 kfree(table[i]);
282 kfree(table);
283}
284
285static void io_rsrc_data_free(struct io_rsrc_data *data)
286{
287 size_t size = data->nr * sizeof(data->tags[0][0]);
288
289 if (data->tags)
290 io_free_page_table((void **)data->tags, size);
291 kfree(data);
292}
293
294static __cold void **io_alloc_page_table(size_t size)
295{
296 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
297 size_t init_size = size;
298 void **table;
299
300 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
301 if (!table)
302 return NULL;
303
304 for (i = 0; i < nr_tables; i++) {
305 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
306
307 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
308 if (!table[i]) {
309 io_free_page_table(table, init_size);
310 return NULL;
311 }
312 size -= this_size;
313 }
314 return table;
315}
316
fc7f3a8d
PB
317__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
318 u64 __user *utags,
73572984
JA
319 unsigned nr, struct io_rsrc_data **pdata)
320{
321 struct io_rsrc_data *data;
6acd352d 322 int ret = 0;
73572984
JA
323 unsigned i;
324
325 data = kzalloc(sizeof(*data), GFP_KERNEL);
326 if (!data)
327 return -ENOMEM;
328 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
329 if (!data->tags) {
330 kfree(data);
331 return -ENOMEM;
332 }
333
334 data->nr = nr;
335 data->ctx = ctx;
fc7f3a8d 336 data->rsrc_type = type;
73572984
JA
337 if (utags) {
338 ret = -EFAULT;
339 for (i = 0; i < nr; i++) {
340 u64 *tag_slot = io_get_tag_slot(data, i);
341
342 if (copy_from_user(tag_slot, &utags[i],
343 sizeof(*tag_slot)))
344 goto fail;
345 }
346 }
73572984
JA
347 *pdata = data;
348 return 0;
349fail:
350 io_rsrc_data_free(data);
351 return ret;
352}
353
354static int __io_sqe_files_update(struct io_ring_ctx *ctx,
355 struct io_uring_rsrc_update2 *up,
356 unsigned nr_args)
357{
358 u64 __user *tags = u64_to_user_ptr(up->tags);
359 __s32 __user *fds = u64_to_user_ptr(up->data);
360 struct io_rsrc_data *data = ctx->file_data;
361 struct io_fixed_file *file_slot;
73572984
JA
362 int fd, i, err = 0;
363 unsigned int done;
73572984
JA
364
365 if (!ctx->file_data)
366 return -ENXIO;
367 if (up->offset + nr_args > ctx->nr_user_files)
368 return -EINVAL;
369
370 for (done = 0; done < nr_args; done++) {
371 u64 tag = 0;
372
373 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
374 copy_from_user(&fd, &fds[done], sizeof(fd))) {
375 err = -EFAULT;
376 break;
377 }
378 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
379 err = -EINVAL;
380 break;
381 }
382 if (fd == IORING_REGISTER_FILES_SKIP)
383 continue;
384
385 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
386 file_slot = io_fixed_file_slot(&ctx->file_table, i);
387
388 if (file_slot->file_ptr) {
4bfb0c9a
CH
389 err = io_queue_rsrc_removal(data, i,
390 io_slot_file(file_slot));
73572984
JA
391 if (err)
392 break;
393 file_slot->file_ptr = 0;
394 io_file_bitmap_clear(&ctx->file_table, i);
73572984
JA
395 }
396 if (fd != -1) {
4bfb0c9a
CH
397 struct file *file = fget(fd);
398
73572984
JA
399 if (!file) {
400 err = -EBADF;
401 break;
402 }
403 /*
6e5e6d27 404 * Don't allow io_uring instances to be registered.
73572984
JA
405 */
406 if (io_is_uring_fops(file)) {
407 fput(file);
408 err = -EBADF;
409 break;
410 }
73572984
JA
411 *io_get_tag_slot(data, i) = tag;
412 io_fixed_file_set(file_slot, file);
413 io_file_bitmap_set(&ctx->file_table, i);
414 }
415 }
73572984
JA
416 return done ? done : err;
417}
418
419static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
420 struct io_uring_rsrc_update2 *up,
421 unsigned int nr_args)
422{
423 u64 __user *tags = u64_to_user_ptr(up->tags);
424 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
425 struct page *last_hpage = NULL;
73572984
JA
426 __u32 done;
427 int i, err;
428
429 if (!ctx->buf_data)
430 return -ENXIO;
431 if (up->offset + nr_args > ctx->nr_user_bufs)
432 return -EINVAL;
433
434 for (done = 0; done < nr_args; done++) {
435 struct io_mapped_ubuf *imu;
73572984
JA
436 u64 tag = 0;
437
438 err = io_copy_iov(ctx, &iov, iovs, done);
439 if (err)
440 break;
441 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
442 err = -EFAULT;
443 break;
444 }
445 err = io_buffer_validate(&iov);
446 if (err)
447 break;
448 if (!iov.iov_base && tag) {
449 err = -EINVAL;
450 break;
451 }
452 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
453 if (err)
454 break;
455
9a57fffe 456 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
19a63c40 457 if (ctx->user_bufs[i] != &dummy_ubuf) {
73572984 458 err = io_queue_rsrc_removal(ctx->buf_data, i,
63fea890 459 ctx->user_bufs[i]);
73572984
JA
460 if (unlikely(err)) {
461 io_buffer_unmap(ctx, &imu);
462 break;
463 }
19a63c40 464 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
73572984
JA
465 }
466
467 ctx->user_bufs[i] = imu;
953c37e0 468 *io_get_tag_slot(ctx->buf_data, i) = tag;
73572984 469 }
73572984
JA
470 return done ? done : err;
471}
472
473static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
474 struct io_uring_rsrc_update2 *up,
475 unsigned nr_args)
476{
477 __u32 tmp;
73572984 478
786788a8
PB
479 lockdep_assert_held(&ctx->uring_lock);
480
73572984
JA
481 if (check_add_overflow(up->offset, nr_args, &tmp))
482 return -EOVERFLOW;
73572984
JA
483
484 switch (type) {
485 case IORING_RSRC_FILE:
486 return __io_sqe_files_update(ctx, up, nr_args);
487 case IORING_RSRC_BUFFER:
488 return __io_sqe_buffers_update(ctx, up, nr_args);
489 }
490 return -EINVAL;
491}
492
493int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
494 unsigned nr_args)
495{
496 struct io_uring_rsrc_update2 up;
497
498 if (!nr_args)
499 return -EINVAL;
500 memset(&up, 0, sizeof(up));
501 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
502 return -EFAULT;
503 if (up.resv || up.resv2)
504 return -EINVAL;
505 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
506}
507
508int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
509 unsigned size, unsigned type)
510{
511 struct io_uring_rsrc_update2 up;
512
513 if (size != sizeof(up))
514 return -EINVAL;
515 if (copy_from_user(&up, arg, sizeof(up)))
516 return -EFAULT;
517 if (!up.nr || up.resv || up.resv2)
518 return -EINVAL;
519 return __io_register_rsrc_update(ctx, type, &up, up.nr);
520}
521
522__cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
523 unsigned int size, unsigned int type)
524{
525 struct io_uring_rsrc_register rr;
526
527 /* keep it extendible */
528 if (size != sizeof(rr))
529 return -EINVAL;
530
531 memset(&rr, 0, sizeof(rr));
532 if (copy_from_user(&rr, arg, size))
533 return -EFAULT;
534 if (!rr.nr || rr.resv2)
535 return -EINVAL;
536 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
537 return -EINVAL;
538
539 switch (type) {
540 case IORING_RSRC_FILE:
541 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
542 break;
543 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
544 rr.nr, u64_to_user_ptr(rr.tags));
545 case IORING_RSRC_BUFFER:
546 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
547 break;
548 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
549 rr.nr, u64_to_user_ptr(rr.tags));
550 }
551 return -EINVAL;
552}
553
d9808ceb 554int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
73572984 555{
f2ccb5ae 556 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
557
558 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
559 return -EINVAL;
560 if (sqe->rw_flags || sqe->splice_fd_in)
561 return -EINVAL;
562
563 up->offset = READ_ONCE(sqe->off);
564 up->nr_args = READ_ONCE(sqe->len);
565 if (!up->nr_args)
566 return -EINVAL;
567 up->arg = READ_ONCE(sqe->addr);
568 return 0;
569}
570
571static int io_files_update_with_index_alloc(struct io_kiocb *req,
572 unsigned int issue_flags)
573{
f2ccb5ae 574 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
575 __s32 __user *fds = u64_to_user_ptr(up->arg);
576 unsigned int done;
577 struct file *file;
578 int ret, fd;
579
580 if (!req->ctx->file_data)
581 return -ENXIO;
582
583 for (done = 0; done < up->nr_args; done++) {
584 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
585 ret = -EFAULT;
586 break;
587 }
588
589 file = fget(fd);
590 if (!file) {
591 ret = -EBADF;
592 break;
593 }
594 ret = io_fixed_fd_install(req, issue_flags, file,
595 IORING_FILE_INDEX_ALLOC);
596 if (ret < 0)
597 break;
598 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
f110ed84 599 __io_close_fixed(req->ctx, issue_flags, ret);
73572984
JA
600 ret = -EFAULT;
601 break;
602 }
603 }
604
605 if (done)
606 return done;
607 return ret;
608}
609
d9808ceb 610int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
73572984 611{
f2ccb5ae 612 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
73572984
JA
613 struct io_ring_ctx *ctx = req->ctx;
614 struct io_uring_rsrc_update2 up2;
615 int ret;
616
617 up2.offset = up->offset;
618 up2.data = up->arg;
619 up2.nr = 0;
620 up2.tags = 0;
621 up2.resv = 0;
622 up2.resv2 = 0;
623
624 if (up->offset == IORING_FILE_INDEX_ALLOC) {
625 ret = io_files_update_with_index_alloc(req, issue_flags);
626 } else {
627 io_ring_submit_lock(ctx, issue_flags);
628 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
629 &up2, up->nr_args);
630 io_ring_submit_unlock(ctx, issue_flags);
631 }
632
633 if (ret < 0)
634 req_set_fail(req);
635 io_req_set_res(req, ret, 0);
636 return IOU_OK;
637}
638
63fea890 639int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
73572984 640{
63fea890
PB
641 struct io_ring_ctx *ctx = data->ctx;
642 struct io_rsrc_node *node = ctx->rsrc_node;
73572984 643 u64 *tag_slot = io_get_tag_slot(data, idx);
73572984 644
c376644f
PB
645 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
646 if (unlikely(!ctx->rsrc_node)) {
647 ctx->rsrc_node = node;
648 return -ENOMEM;
ff7c75ec 649 }
73572984 650
c376644f 651 node->item.rsrc = rsrc;
2236b390 652 node->type = data->rsrc_type;
c376644f 653 node->item.tag = *tag_slot;
73572984 654 *tag_slot = 0;
c376644f
PB
655 list_add_tail(&node->node, &ctx->rsrc_ref_list);
656 io_put_rsrc_node(ctx, node);
73572984
JA
657 return 0;
658}
659
660void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
661{
73572984
JA
662 int i;
663
664 for (i = 0; i < ctx->nr_user_files; i++) {
665 struct file *file = io_file_from_index(&ctx->file_table, i);
666
6e5e6d27 667 if (!file)
73572984
JA
668 continue;
669 io_file_bitmap_clear(&ctx->file_table, i);
670 fput(file);
671 }
73572984 672
73572984 673 io_free_file_tables(&ctx->file_table);
02a4d923 674 io_file_table_set_alloc_range(ctx, 0, 0);
73572984
JA
675 io_rsrc_data_free(ctx->file_data);
676 ctx->file_data = NULL;
677 ctx->nr_user_files = 0;
678}
679
680int io_sqe_files_unregister(struct io_ring_ctx *ctx)
681{
682 unsigned nr = ctx->nr_user_files;
683 int ret;
684
685 if (!ctx->file_data)
686 return -ENXIO;
687
688 /*
689 * Quiesce may unlock ->uring_lock, and while it's not held
690 * prevent new requests using the table.
691 */
692 ctx->nr_user_files = 0;
693 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
694 ctx->nr_user_files = nr;
695 if (!ret)
696 __io_sqe_files_unregister(ctx);
697 return ret;
698}
699
73572984
JA
700int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
701 unsigned nr_args, u64 __user *tags)
702{
703 __s32 __user *fds = (__s32 __user *) arg;
704 struct file *file;
705 int fd, ret;
706 unsigned i;
707
708 if (ctx->file_data)
709 return -EBUSY;
710 if (!nr_args)
711 return -EINVAL;
712 if (nr_args > IORING_MAX_FIXED_FILES)
713 return -EMFILE;
714 if (nr_args > rlimit(RLIMIT_NOFILE))
715 return -EMFILE;
fc7f3a8d 716 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
73572984
JA
717 &ctx->file_data);
718 if (ret)
719 return ret;
720
721 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
722 io_rsrc_data_free(ctx->file_data);
723 ctx->file_data = NULL;
724 return -ENOMEM;
725 }
726
727 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
728 struct io_fixed_file *file_slot;
729
730 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
731 ret = -EFAULT;
732 goto fail;
733 }
734 /* allow sparse sets */
735 if (!fds || fd == -1) {
736 ret = -EINVAL;
737 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
738 goto fail;
739 continue;
740 }
741
742 file = fget(fd);
743 ret = -EBADF;
744 if (unlikely(!file))
745 goto fail;
746
747 /*
6e5e6d27 748 * Don't allow io_uring instances to be registered.
73572984
JA
749 */
750 if (io_is_uring_fops(file)) {
751 fput(file);
752 goto fail;
753 }
73572984
JA
754 file_slot = io_fixed_file_slot(&ctx->file_table, i);
755 io_fixed_file_set(file_slot, file);
756 io_file_bitmap_set(&ctx->file_table, i);
757 }
758
6e73dffb
PB
759 /* default it to the whole table */
760 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
73572984
JA
761 return 0;
762fail:
763 __io_sqe_files_unregister(ctx);
764 return ret;
765}
766
767static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
768{
769 io_buffer_unmap(ctx, &prsrc->buf);
770 prsrc->buf = NULL;
771}
772
773void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
774{
775 unsigned int i;
776
777 for (i = 0; i < ctx->nr_user_bufs; i++)
778 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
779 kfree(ctx->user_bufs);
780 io_rsrc_data_free(ctx->buf_data);
781 ctx->user_bufs = NULL;
782 ctx->buf_data = NULL;
783 ctx->nr_user_bufs = 0;
784}
785
786int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
787{
788 unsigned nr = ctx->nr_user_bufs;
789 int ret;
790
791 if (!ctx->buf_data)
792 return -ENXIO;
793
794 /*
795 * Quiesce may unlock ->uring_lock, and while it's not held
796 * prevent new requests using the table.
797 */
798 ctx->nr_user_bufs = 0;
799 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
800 ctx->nr_user_bufs = nr;
801 if (!ret)
802 __io_sqe_buffers_unregister(ctx);
803 return ret;
804}
805
806/*
807 * Not super efficient, but this is just a registration time. And we do cache
808 * the last compound head, so generally we'll only do a full search if we don't
809 * match that one.
810 *
811 * We check if the given compound head page has already been accounted, to
812 * avoid double accounting it. This allows us to account the full size of the
813 * page, not just the constituent pages of a huge page.
814 */
815static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
816 int nr_pages, struct page *hpage)
817{
818 int i, j;
819
820 /* check current page array */
821 for (i = 0; i < nr_pages; i++) {
822 if (!PageCompound(pages[i]))
823 continue;
824 if (compound_head(pages[i]) == hpage)
825 return true;
826 }
827
828 /* check previously registered pages */
829 for (i = 0; i < ctx->nr_user_bufs; i++) {
830 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
831
832 for (j = 0; j < imu->nr_bvecs; j++) {
833 if (!PageCompound(imu->bvec[j].bv_page))
834 continue;
835 if (compound_head(imu->bvec[j].bv_page) == hpage)
836 return true;
837 }
838 }
839
840 return false;
841}
842
843static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
844 int nr_pages, struct io_mapped_ubuf *imu,
845 struct page **last_hpage)
846{
847 int i, ret;
848
849 imu->acct_pages = 0;
850 for (i = 0; i < nr_pages; i++) {
851 if (!PageCompound(pages[i])) {
852 imu->acct_pages++;
853 } else {
854 struct page *hpage;
855
856 hpage = compound_head(pages[i]);
857 if (hpage == *last_hpage)
858 continue;
859 *last_hpage = hpage;
860 if (headpage_already_acct(ctx, pages, i, hpage))
861 continue;
862 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
863 }
864 }
865
866 if (!imu->acct_pages)
867 return 0;
868
869 ret = io_account_mem(ctx, imu->acct_pages);
870 if (ret)
871 imu->acct_pages = 0;
872 return ret;
873}
874
73572984
JA
875static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
876 struct io_mapped_ubuf **pimu,
877 struct page **last_hpage)
878{
879 struct io_mapped_ubuf *imu = NULL;
880 struct page **pages = NULL;
881 unsigned long off;
882 size_t size;
883 int ret, nr_pages, i;
977bc873 884 struct folio *folio = NULL;
73572984 885
19a63c40 886 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
5ff4fdff 887 if (!iov->iov_base)
73572984 888 return 0;
73572984 889
73572984 890 ret = -ENOMEM;
73572984
JA
891 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
892 &nr_pages);
893 if (IS_ERR(pages)) {
894 ret = PTR_ERR(pages);
895 pages = NULL;
896 goto done;
897 }
898
57bebf80
PB
899 /* If it's a huge page, try to coalesce them into a single bvec entry */
900 if (nr_pages > 1) {
901 folio = page_folio(pages[0]);
902 for (i = 1; i < nr_pages; i++) {
776617db
TH
903 /*
904 * Pages must be consecutive and on the same folio for
905 * this to work
906 */
907 if (page_folio(pages[i]) != folio ||
908 pages[i] != pages[i - 1] + 1) {
57bebf80
PB
909 folio = NULL;
910 break;
911 }
912 }
913 if (folio) {
d2acf789
PB
914 /*
915 * The pages are bound to the folio, it doesn't
916 * actually unpin them but drops all but one reference,
917 * which is usually put down by io_buffer_unmap().
918 * Note, needs a better helper.
919 */
920 unpin_user_pages(&pages[1], nr_pages - 1);
57bebf80
PB
921 nr_pages = 1;
922 }
923 }
924
73572984
JA
925 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
926 if (!imu)
927 goto done;
928
929 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
930 if (ret) {
931 unpin_user_pages(pages, nr_pages);
932 goto done;
933 }
934
935 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
936 size = iov->iov_len;
57bebf80
PB
937 /* store original address for later verification */
938 imu->ubuf = (unsigned long) iov->iov_base;
939 imu->ubuf_end = imu->ubuf + iov->iov_len;
940 imu->nr_bvecs = nr_pages;
941 *pimu = imu;
942 ret = 0;
943
944 if (folio) {
945 bvec_set_page(&imu->bvec[0], pages[0], size, off);
946 goto done;
947 }
73572984
JA
948 for (i = 0; i < nr_pages; i++) {
949 size_t vec_len;
950
951 vec_len = min_t(size_t, size, PAGE_SIZE - off);
cc342a21 952 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
73572984
JA
953 off = 0;
954 size -= vec_len;
955 }
73572984
JA
956done:
957 if (ret)
958 kvfree(imu);
959 kvfree(pages);
960 return ret;
961}
962
963static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
964{
965 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
966 return ctx->user_bufs ? 0 : -ENOMEM;
967}
968
969int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
970 unsigned int nr_args, u64 __user *tags)
971{
972 struct page *last_hpage = NULL;
973 struct io_rsrc_data *data;
974 int i, ret;
975 struct iovec iov;
976
977 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
978
979 if (ctx->user_bufs)
980 return -EBUSY;
981 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
982 return -EINVAL;
fc7f3a8d 983 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
73572984
JA
984 if (ret)
985 return ret;
986 ret = io_buffers_map_alloc(ctx, nr_args);
987 if (ret) {
988 io_rsrc_data_free(data);
989 return ret;
990 }
991
992 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
993 if (arg) {
994 ret = io_copy_iov(ctx, &iov, arg, i);
995 if (ret)
996 break;
997 ret = io_buffer_validate(&iov);
998 if (ret)
999 break;
1000 } else {
1001 memset(&iov, 0, sizeof(iov));
1002 }
1003
1004 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1005 ret = -EINVAL;
1006 break;
1007 }
1008
1009 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1010 &last_hpage);
1011 if (ret)
1012 break;
1013 }
1014
1015 WARN_ON_ONCE(ctx->buf_data);
1016
1017 ctx->buf_data = data;
1018 if (ret)
1019 __io_sqe_buffers_unregister(ctx);
73572984
JA
1020 return ret;
1021}
c059f785
PB
1022
1023int io_import_fixed(int ddir, struct iov_iter *iter,
1024 struct io_mapped_ubuf *imu,
1025 u64 buf_addr, size_t len)
1026{
1027 u64 buf_end;
1028 size_t offset;
1029
1030 if (WARN_ON_ONCE(!imu))
1031 return -EFAULT;
1032 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1033 return -EFAULT;
1034 /* not inside the mapped region */
1035 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1036 return -EFAULT;
1037
1038 /*
6bf65a1b 1039 * Might not be a start of buffer, set size appropriately
c059f785
PB
1040 * and advance us to the beginning.
1041 */
1042 offset = buf_addr - imu->ubuf;
1043 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1044
1045 if (offset) {
1046 /*
1047 * Don't use iov_iter_advance() here, as it's really slow for
1048 * using the latter parts of a big fixed buffer - it iterates
1049 * over each segment manually. We can cheat a bit here, because
1050 * we know that:
1051 *
1052 * 1) it's a BVEC iter, we set it up
1053 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1054 * first and last bvec
1055 *
1056 * So just find our index, and adjust the iterator afterwards.
1057 * If the offset is within the first bvec (or the whole first
1058 * bvec, just use iov_iter_advance(). This makes it easier
1059 * since we can just skip the first segment, which may not
1060 * be PAGE_SIZE aligned.
1061 */
1062 const struct bio_vec *bvec = imu->bvec;
1063
d6fef34e 1064 if (offset < bvec->bv_len) {
57bebf80
PB
1065 /*
1066 * Note, huge pages buffers consists of one large
1067 * bvec entry and should always go this way. The other
1068 * branch doesn't expect non PAGE_SIZE'd chunks.
1069 */
b000ae0e
PB
1070 iter->bvec = bvec;
1071 iter->nr_segs = bvec->bv_len;
1072 iter->count -= offset;
1073 iter->iov_offset = offset;
c059f785
PB
1074 } else {
1075 unsigned long seg_skip;
1076
1077 /* skip first vec */
1078 offset -= bvec->bv_len;
1079 seg_skip = 1 + (offset >> PAGE_SHIFT);
1080
1081 iter->bvec = bvec + seg_skip;
1082 iter->nr_segs -= seg_skip;
1083 iter->count -= bvec->bv_len + offset;
1084 iter->iov_offset = offset & ~PAGE_MASK;
1085 }
1086 }
1087
1088 return 0;
1089}
This page took 0.331907 seconds and 4 git commands to generate.