1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
13 #include <uapi/linux/io_uring.h>
16 #include "alloc_cache.h"
17 #include "openclose.h"
21 struct io_rsrc_update {
28 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
29 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
30 struct io_mapped_ubuf **pimu,
31 struct page **last_hpage);
34 #define IORING_MAX_FIXED_FILES (1U << 20)
35 #define IORING_MAX_REG_BUFFERS (1U << 14)
37 static const struct io_mapped_ubuf dummy_ubuf = {
38 /* set invalid range, so io_import_fixed() fails meeting it */
43 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
45 unsigned long page_limit, cur_pages, new_pages;
50 /* Don't allow more pages than we can safely lock */
51 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
53 cur_pages = atomic_long_read(&user->locked_vm);
55 new_pages = cur_pages + nr_pages;
56 if (new_pages > page_limit)
58 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
59 &cur_pages, new_pages));
63 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
66 __io_unaccount_mem(ctx->user, nr_pages);
69 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
72 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
77 ret = __io_account_mem(ctx->user, nr_pages);
83 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
88 static int io_buffer_validate(struct iovec *iov)
90 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
93 * Don't impose further limits on the size and buffer
94 * constraints here, we'll -EINVAL later when IO is
95 * submitted if they are wrong.
98 return iov->iov_len ? -EFAULT : 0;
102 /* arbitrary limit, but we need something */
103 if (iov->iov_len > SZ_1G)
106 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
112 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
114 struct io_mapped_ubuf *imu = *slot;
117 if (imu != &dummy_ubuf) {
118 for (i = 0; i < imu->nr_bvecs; i++)
119 unpin_user_page(imu->bvec[i].bv_page);
121 io_unaccount_mem(ctx, imu->acct_pages);
127 static void io_rsrc_put_work(struct io_rsrc_node *node)
129 struct io_rsrc_put *prsrc = &node->item;
132 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
134 switch (node->type) {
135 case IORING_RSRC_FILE:
138 case IORING_RSRC_BUFFER:
139 io_rsrc_buf_put(node->ctx, prsrc);
147 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
149 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
153 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
154 __must_hold(&node->ctx->uring_lock)
156 struct io_ring_ctx *ctx = node->ctx;
158 while (!list_empty(&ctx->rsrc_ref_list)) {
159 node = list_first_entry(&ctx->rsrc_ref_list,
160 struct io_rsrc_node, node);
161 /* recycle ref nodes in order */
164 list_del(&node->node);
166 if (likely(!node->empty))
167 io_rsrc_put_work(node);
168 io_rsrc_node_destroy(ctx, node);
170 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
171 wake_up_all(&ctx->rsrc_quiesce_wq);
174 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
176 struct io_rsrc_node *ref_node;
178 ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
180 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
191 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
192 struct io_ring_ctx *ctx)
194 struct io_rsrc_node *backup;
198 /* As We may drop ->uring_lock, other task may have started quiesce */
202 backup = io_rsrc_node_alloc(ctx);
205 ctx->rsrc_node->empty = true;
206 ctx->rsrc_node->type = -1;
207 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
208 io_put_rsrc_node(ctx, ctx->rsrc_node);
209 ctx->rsrc_node = backup;
211 if (list_empty(&ctx->rsrc_ref_list))
214 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
215 atomic_set(&ctx->cq_wait_nr, 1);
220 data->quiesce = true;
222 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
223 mutex_unlock(&ctx->uring_lock);
225 ret = io_run_task_work_sig(ctx);
227 finish_wait(&ctx->rsrc_quiesce_wq, &we);
228 mutex_lock(&ctx->uring_lock);
229 if (list_empty(&ctx->rsrc_ref_list))
235 mutex_lock(&ctx->uring_lock);
237 } while (!list_empty(&ctx->rsrc_ref_list));
239 finish_wait(&ctx->rsrc_quiesce_wq, &we);
240 data->quiesce = false;
243 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
244 atomic_set(&ctx->cq_wait_nr, 0);
250 static void io_free_page_table(void **table, size_t size)
252 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
254 for (i = 0; i < nr_tables; i++)
259 static void io_rsrc_data_free(struct io_rsrc_data *data)
261 size_t size = data->nr * sizeof(data->tags[0][0]);
264 io_free_page_table((void **)data->tags, size);
268 static __cold void **io_alloc_page_table(size_t size)
270 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
271 size_t init_size = size;
274 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
278 for (i = 0; i < nr_tables; i++) {
279 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
281 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
283 io_free_page_table(table, init_size);
291 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
293 unsigned nr, struct io_rsrc_data **pdata)
295 struct io_rsrc_data *data;
299 data = kzalloc(sizeof(*data), GFP_KERNEL);
302 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
310 data->rsrc_type = type;
313 for (i = 0; i < nr; i++) {
314 u64 *tag_slot = io_get_tag_slot(data, i);
316 if (copy_from_user(tag_slot, &utags[i],
324 io_rsrc_data_free(data);
328 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
329 struct io_uring_rsrc_update2 *up,
332 u64 __user *tags = u64_to_user_ptr(up->tags);
333 __s32 __user *fds = u64_to_user_ptr(up->data);
334 struct io_rsrc_data *data = ctx->file_data;
335 struct io_fixed_file *file_slot;
341 if (up->offset + nr_args > ctx->nr_user_files)
344 for (done = 0; done < nr_args; done++) {
347 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
348 copy_from_user(&fd, &fds[done], sizeof(fd))) {
352 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
356 if (fd == IORING_REGISTER_FILES_SKIP)
359 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
360 file_slot = io_fixed_file_slot(&ctx->file_table, i);
362 if (file_slot->file_ptr) {
363 err = io_queue_rsrc_removal(data, i,
364 io_slot_file(file_slot));
367 file_slot->file_ptr = 0;
368 io_file_bitmap_clear(&ctx->file_table, i);
371 struct file *file = fget(fd);
378 * Don't allow io_uring instances to be registered.
380 if (io_is_uring_fops(file)) {
385 *io_get_tag_slot(data, i) = tag;
386 io_fixed_file_set(file_slot, file);
387 io_file_bitmap_set(&ctx->file_table, i);
390 return done ? done : err;
393 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
394 struct io_uring_rsrc_update2 *up,
395 unsigned int nr_args)
397 struct iovec __user *uvec = u64_to_user_ptr(up->data);
398 u64 __user *tags = u64_to_user_ptr(up->tags);
399 struct iovec fast_iov, *iov;
400 struct page *last_hpage = NULL;
406 if (up->offset + nr_args > ctx->nr_user_bufs)
409 for (done = 0; done < nr_args; done++) {
410 struct io_mapped_ubuf *imu;
413 iov = iovec_from_user(&uvec[done], 1, 1, &fast_iov, ctx->compat);
418 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
422 err = io_buffer_validate(iov);
425 if (!iov->iov_base && tag) {
429 err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
433 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
434 if (ctx->user_bufs[i] != &dummy_ubuf) {
435 err = io_queue_rsrc_removal(ctx->buf_data, i,
438 io_buffer_unmap(ctx, &imu);
441 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
444 ctx->user_bufs[i] = imu;
445 *io_get_tag_slot(ctx->buf_data, i) = tag;
447 return done ? done : err;
450 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
451 struct io_uring_rsrc_update2 *up,
456 lockdep_assert_held(&ctx->uring_lock);
458 if (check_add_overflow(up->offset, nr_args, &tmp))
462 case IORING_RSRC_FILE:
463 return __io_sqe_files_update(ctx, up, nr_args);
464 case IORING_RSRC_BUFFER:
465 return __io_sqe_buffers_update(ctx, up, nr_args);
470 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
473 struct io_uring_rsrc_update2 up;
477 memset(&up, 0, sizeof(up));
478 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
480 if (up.resv || up.resv2)
482 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
485 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
486 unsigned size, unsigned type)
488 struct io_uring_rsrc_update2 up;
490 if (size != sizeof(up))
492 if (copy_from_user(&up, arg, sizeof(up)))
494 if (!up.nr || up.resv || up.resv2)
496 return __io_register_rsrc_update(ctx, type, &up, up.nr);
499 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
500 unsigned int size, unsigned int type)
502 struct io_uring_rsrc_register rr;
504 /* keep it extendible */
505 if (size != sizeof(rr))
508 memset(&rr, 0, sizeof(rr));
509 if (copy_from_user(&rr, arg, size))
511 if (!rr.nr || rr.resv2)
513 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
517 case IORING_RSRC_FILE:
518 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
520 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
521 rr.nr, u64_to_user_ptr(rr.tags));
522 case IORING_RSRC_BUFFER:
523 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
525 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
526 rr.nr, u64_to_user_ptr(rr.tags));
531 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
533 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
535 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
537 if (sqe->rw_flags || sqe->splice_fd_in)
540 up->offset = READ_ONCE(sqe->off);
541 up->nr_args = READ_ONCE(sqe->len);
544 up->arg = READ_ONCE(sqe->addr);
548 static int io_files_update_with_index_alloc(struct io_kiocb *req,
549 unsigned int issue_flags)
551 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
552 __s32 __user *fds = u64_to_user_ptr(up->arg);
557 if (!req->ctx->file_data)
560 for (done = 0; done < up->nr_args; done++) {
561 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
571 ret = io_fixed_fd_install(req, issue_flags, file,
572 IORING_FILE_INDEX_ALLOC);
575 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
576 __io_close_fixed(req->ctx, issue_flags, ret);
587 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
589 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
590 struct io_ring_ctx *ctx = req->ctx;
591 struct io_uring_rsrc_update2 up2;
594 up2.offset = up->offset;
601 if (up->offset == IORING_FILE_INDEX_ALLOC) {
602 ret = io_files_update_with_index_alloc(req, issue_flags);
604 io_ring_submit_lock(ctx, issue_flags);
605 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
607 io_ring_submit_unlock(ctx, issue_flags);
612 io_req_set_res(req, ret, 0);
616 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
618 struct io_ring_ctx *ctx = data->ctx;
619 struct io_rsrc_node *node = ctx->rsrc_node;
620 u64 *tag_slot = io_get_tag_slot(data, idx);
622 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
623 if (unlikely(!ctx->rsrc_node)) {
624 ctx->rsrc_node = node;
628 node->item.rsrc = rsrc;
629 node->type = data->rsrc_type;
630 node->item.tag = *tag_slot;
632 list_add_tail(&node->node, &ctx->rsrc_ref_list);
633 io_put_rsrc_node(ctx, node);
637 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
641 for (i = 0; i < ctx->nr_user_files; i++) {
642 struct file *file = io_file_from_index(&ctx->file_table, i);
646 io_file_bitmap_clear(&ctx->file_table, i);
650 io_free_file_tables(&ctx->file_table);
651 io_file_table_set_alloc_range(ctx, 0, 0);
652 io_rsrc_data_free(ctx->file_data);
653 ctx->file_data = NULL;
654 ctx->nr_user_files = 0;
657 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
659 unsigned nr = ctx->nr_user_files;
666 * Quiesce may unlock ->uring_lock, and while it's not held
667 * prevent new requests using the table.
669 ctx->nr_user_files = 0;
670 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
671 ctx->nr_user_files = nr;
673 __io_sqe_files_unregister(ctx);
677 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
678 unsigned nr_args, u64 __user *tags)
680 __s32 __user *fds = (__s32 __user *) arg;
689 if (nr_args > IORING_MAX_FIXED_FILES)
691 if (nr_args > rlimit(RLIMIT_NOFILE))
693 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
698 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
699 io_rsrc_data_free(ctx->file_data);
700 ctx->file_data = NULL;
704 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
705 struct io_fixed_file *file_slot;
707 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
711 /* allow sparse sets */
712 if (!fds || fd == -1) {
714 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
725 * Don't allow io_uring instances to be registered.
727 if (io_is_uring_fops(file)) {
731 file_slot = io_fixed_file_slot(&ctx->file_table, i);
732 io_fixed_file_set(file_slot, file);
733 io_file_bitmap_set(&ctx->file_table, i);
736 /* default it to the whole table */
737 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
740 __io_sqe_files_unregister(ctx);
744 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
746 io_buffer_unmap(ctx, &prsrc->buf);
750 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
754 for (i = 0; i < ctx->nr_user_bufs; i++)
755 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
756 kfree(ctx->user_bufs);
757 io_rsrc_data_free(ctx->buf_data);
758 ctx->user_bufs = NULL;
759 ctx->buf_data = NULL;
760 ctx->nr_user_bufs = 0;
763 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
765 unsigned nr = ctx->nr_user_bufs;
772 * Quiesce may unlock ->uring_lock, and while it's not held
773 * prevent new requests using the table.
775 ctx->nr_user_bufs = 0;
776 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
777 ctx->nr_user_bufs = nr;
779 __io_sqe_buffers_unregister(ctx);
784 * Not super efficient, but this is just a registration time. And we do cache
785 * the last compound head, so generally we'll only do a full search if we don't
788 * We check if the given compound head page has already been accounted, to
789 * avoid double accounting it. This allows us to account the full size of the
790 * page, not just the constituent pages of a huge page.
792 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
793 int nr_pages, struct page *hpage)
797 /* check current page array */
798 for (i = 0; i < nr_pages; i++) {
799 if (!PageCompound(pages[i]))
801 if (compound_head(pages[i]) == hpage)
805 /* check previously registered pages */
806 for (i = 0; i < ctx->nr_user_bufs; i++) {
807 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
809 for (j = 0; j < imu->nr_bvecs; j++) {
810 if (!PageCompound(imu->bvec[j].bv_page))
812 if (compound_head(imu->bvec[j].bv_page) == hpage)
820 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
821 int nr_pages, struct io_mapped_ubuf *imu,
822 struct page **last_hpage)
827 for (i = 0; i < nr_pages; i++) {
828 if (!PageCompound(pages[i])) {
833 hpage = compound_head(pages[i]);
834 if (hpage == *last_hpage)
837 if (headpage_already_acct(ctx, pages, i, hpage))
839 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
843 if (!imu->acct_pages)
846 ret = io_account_mem(ctx, imu->acct_pages);
852 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
853 struct io_mapped_ubuf **pimu,
854 struct page **last_hpage)
856 struct io_mapped_ubuf *imu = NULL;
857 struct page **pages = NULL;
860 int ret, nr_pages, i;
861 struct folio *folio = NULL;
863 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
868 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
871 ret = PTR_ERR(pages);
876 /* If it's a huge page, try to coalesce them into a single bvec entry */
878 folio = page_folio(pages[0]);
879 for (i = 1; i < nr_pages; i++) {
881 * Pages must be consecutive and on the same folio for
884 if (page_folio(pages[i]) != folio ||
885 pages[i] != pages[i - 1] + 1) {
892 * The pages are bound to the folio, it doesn't
893 * actually unpin them but drops all but one reference,
894 * which is usually put down by io_buffer_unmap().
895 * Note, needs a better helper.
897 unpin_user_pages(&pages[1], nr_pages - 1);
902 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
906 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
908 unpin_user_pages(pages, nr_pages);
912 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
914 /* store original address for later verification */
915 imu->ubuf = (unsigned long) iov->iov_base;
916 imu->ubuf_end = imu->ubuf + iov->iov_len;
917 imu->nr_bvecs = nr_pages;
922 bvec_set_page(&imu->bvec[0], pages[0], size, off);
925 for (i = 0; i < nr_pages; i++) {
928 vec_len = min_t(size_t, size, PAGE_SIZE - off);
929 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
940 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
942 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
943 return ctx->user_bufs ? 0 : -ENOMEM;
946 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
947 unsigned int nr_args, u64 __user *tags)
949 struct page *last_hpage = NULL;
950 struct io_rsrc_data *data;
951 struct iovec fast_iov, *iov = &fast_iov;
952 const struct iovec __user *uvec = (struct iovec * __user) arg;
955 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
959 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
961 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
964 ret = io_buffers_map_alloc(ctx, nr_args);
966 io_rsrc_data_free(data);
971 memset(iov, 0, sizeof(*iov));
973 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
975 iov = iovec_from_user(&uvec[i], 1, 1, &fast_iov, ctx->compat);
980 ret = io_buffer_validate(iov);
985 if (!iov->iov_base && *io_get_tag_slot(data, i)) {
990 ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
996 WARN_ON_ONCE(ctx->buf_data);
998 ctx->buf_data = data;
1000 __io_sqe_buffers_unregister(ctx);
1004 int io_import_fixed(int ddir, struct iov_iter *iter,
1005 struct io_mapped_ubuf *imu,
1006 u64 buf_addr, size_t len)
1011 if (WARN_ON_ONCE(!imu))
1013 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1015 /* not inside the mapped region */
1016 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1020 * Might not be a start of buffer, set size appropriately
1021 * and advance us to the beginning.
1023 offset = buf_addr - imu->ubuf;
1024 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1028 * Don't use iov_iter_advance() here, as it's really slow for
1029 * using the latter parts of a big fixed buffer - it iterates
1030 * over each segment manually. We can cheat a bit here, because
1033 * 1) it's a BVEC iter, we set it up
1034 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1035 * first and last bvec
1037 * So just find our index, and adjust the iterator afterwards.
1038 * If the offset is within the first bvec (or the whole first
1039 * bvec, just use iov_iter_advance(). This makes it easier
1040 * since we can just skip the first segment, which may not
1041 * be PAGE_SIZE aligned.
1043 const struct bio_vec *bvec = imu->bvec;
1045 if (offset < bvec->bv_len) {
1047 * Note, huge pages buffers consists of one large
1048 * bvec entry and should always go this way. The other
1049 * branch doesn't expect non PAGE_SIZE'd chunks.
1052 iter->count -= offset;
1053 iter->iov_offset = offset;
1055 unsigned long seg_skip;
1057 /* skip first vec */
1058 offset -= bvec->bv_len;
1059 seg_skip = 1 + (offset >> PAGE_SHIFT);
1061 iter->bvec = bvec + seg_skip;
1062 iter->nr_segs -= seg_skip;
1063 iter->count -= bvec->bv_len + offset;
1064 iter->iov_offset = offset & ~PAGE_MASK;