1 #include <linux/export.h>
2 #include <linux/bvec.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
26 while (unlikely(!left && n)) { \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
31 __v.iov_base = __p->iov_base; \
33 __v.iov_len -= left; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
47 skip += __v.iov_len; \
50 while (unlikely(n)) { \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
55 __v.iov_base = __p->iov_base; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else if (unlikely(i->type & ITER_DISCARD)) { \
88 const struct iovec *iov; \
90 iterate_iovec(i, n, v, iov, skip, (I)) \
95 #define iterate_and_advance(i, n, v, I, B, K) { \
96 if (unlikely(i->count < n)) \
99 size_t skip = i->iov_offset; \
100 if (unlikely(i->type & ITER_BVEC)) { \
101 const struct bio_vec *bvec = i->bvec; \
103 struct bvec_iter __bi; \
104 iterate_bvec(i, n, v, __bi, skip, (B)) \
105 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
106 i->nr_segs -= i->bvec - bvec; \
107 skip = __bi.bi_bvec_done; \
108 } else if (unlikely(i->type & ITER_KVEC)) { \
109 const struct kvec *kvec; \
111 iterate_kvec(i, n, v, kvec, skip, (K)) \
112 if (skip == kvec->iov_len) { \
116 i->nr_segs -= kvec - i->kvec; \
118 } else if (unlikely(i->type & ITER_DISCARD)) { \
121 const struct iovec *iov; \
123 iterate_iovec(i, n, v, iov, skip, (I)) \
124 if (skip == iov->iov_len) { \
128 i->nr_segs -= iov - i->iov; \
132 i->iov_offset = skip; \
136 static int copyout(void __user *to, const void *from, size_t n)
138 if (access_ok(VERIFY_WRITE, to, n)) {
139 kasan_check_read(from, n);
140 n = raw_copy_to_user(to, from, n);
145 static int copyin(void *to, const void __user *from, size_t n)
147 if (access_ok(VERIFY_READ, from, n)) {
148 kasan_check_write(to, n);
149 n = raw_copy_from_user(to, from, n);
154 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
157 size_t skip, copy, left, wanted;
158 const struct iovec *iov;
162 if (unlikely(bytes > i->count))
165 if (unlikely(!bytes))
171 skip = i->iov_offset;
172 buf = iov->iov_base + skip;
173 copy = min(bytes, iov->iov_len - skip);
175 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
176 kaddr = kmap_atomic(page);
177 from = kaddr + offset;
179 /* first chunk, usually the only one */
180 left = copyout(buf, from, copy);
186 while (unlikely(!left && bytes)) {
189 copy = min(bytes, iov->iov_len);
190 left = copyout(buf, from, copy);
196 if (likely(!bytes)) {
197 kunmap_atomic(kaddr);
200 offset = from - kaddr;
202 kunmap_atomic(kaddr);
203 copy = min(bytes, iov->iov_len - skip);
205 /* Too bad - revert to non-atomic kmap */
208 from = kaddr + offset;
209 left = copyout(buf, from, copy);
214 while (unlikely(!left && bytes)) {
217 copy = min(bytes, iov->iov_len);
218 left = copyout(buf, from, copy);
227 if (skip == iov->iov_len) {
231 i->count -= wanted - bytes;
232 i->nr_segs -= iov - i->iov;
234 i->iov_offset = skip;
235 return wanted - bytes;
238 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
241 size_t skip, copy, left, wanted;
242 const struct iovec *iov;
246 if (unlikely(bytes > i->count))
249 if (unlikely(!bytes))
255 skip = i->iov_offset;
256 buf = iov->iov_base + skip;
257 copy = min(bytes, iov->iov_len - skip);
259 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
260 kaddr = kmap_atomic(page);
263 /* first chunk, usually the only one */
264 left = copyin(to, buf, copy);
270 while (unlikely(!left && bytes)) {
273 copy = min(bytes, iov->iov_len);
274 left = copyin(to, buf, copy);
280 if (likely(!bytes)) {
281 kunmap_atomic(kaddr);
286 kunmap_atomic(kaddr);
287 copy = min(bytes, iov->iov_len - skip);
289 /* Too bad - revert to non-atomic kmap */
293 left = copyin(to, buf, copy);
298 while (unlikely(!left && bytes)) {
301 copy = min(bytes, iov->iov_len);
302 left = copyin(to, buf, copy);
311 if (skip == iov->iov_len) {
315 i->count -= wanted - bytes;
316 i->nr_segs -= iov - i->iov;
318 i->iov_offset = skip;
319 return wanted - bytes;
323 static bool sanity(const struct iov_iter *i)
325 struct pipe_inode_info *pipe = i->pipe;
327 int next = pipe->curbuf + pipe->nrbufs;
329 struct pipe_buffer *p;
330 if (unlikely(!pipe->nrbufs))
331 goto Bad; // pipe must be non-empty
332 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
333 goto Bad; // must be at the last buffer...
335 p = &pipe->bufs[idx];
336 if (unlikely(p->offset + p->len != i->iov_offset))
337 goto Bad; // ... at the end of segment
339 if (idx != (next & (pipe->buffers - 1)))
340 goto Bad; // must be right after the last buffer
344 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
345 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
346 pipe->curbuf, pipe->nrbufs, pipe->buffers);
347 for (idx = 0; idx < pipe->buffers; idx++)
348 printk(KERN_ERR "[%p %p %d %d]\n",
350 pipe->bufs[idx].page,
351 pipe->bufs[idx].offset,
352 pipe->bufs[idx].len);
357 #define sanity(i) true
360 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
362 return (idx + 1) & (pipe->buffers - 1);
365 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
368 struct pipe_inode_info *pipe = i->pipe;
369 struct pipe_buffer *buf;
373 if (unlikely(bytes > i->count))
376 if (unlikely(!bytes))
384 buf = &pipe->bufs[idx];
386 if (offset == off && buf->page == page) {
387 /* merge with the last one */
389 i->iov_offset += bytes;
392 idx = next_idx(idx, pipe);
393 buf = &pipe->bufs[idx];
395 if (idx == pipe->curbuf && pipe->nrbufs)
398 buf->ops = &page_cache_pipe_buf_ops;
399 get_page(buf->page = page);
400 buf->offset = offset;
402 i->iov_offset = offset + bytes;
410 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
411 * bytes. For each iovec, fault in each page that constitutes the iovec.
413 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
414 * because it is an invalid address).
416 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
418 size_t skip = i->iov_offset;
419 const struct iovec *iov;
423 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
424 iterate_iovec(i, bytes, v, iov, skip, ({
425 err = fault_in_pages_readable(v.iov_base, v.iov_len);
432 EXPORT_SYMBOL(iov_iter_fault_in_readable);
434 void iov_iter_init(struct iov_iter *i, unsigned int direction,
435 const struct iovec *iov, unsigned long nr_segs,
438 WARN_ON(direction & ~(READ | WRITE));
439 direction &= READ | WRITE;
441 /* It will get better. Eventually... */
442 if (uaccess_kernel()) {
443 i->type = ITER_KVEC | direction;
444 i->kvec = (struct kvec *)iov;
446 i->type = ITER_IOVEC | direction;
449 i->nr_segs = nr_segs;
453 EXPORT_SYMBOL(iov_iter_init);
455 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
457 char *from = kmap_atomic(page);
458 memcpy(to, from + offset, len);
462 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
464 char *to = kmap_atomic(page);
465 memcpy(to + offset, from, len);
469 static void memzero_page(struct page *page, size_t offset, size_t len)
471 char *addr = kmap_atomic(page);
472 memset(addr + offset, 0, len);
476 static inline bool allocated(struct pipe_buffer *buf)
478 return buf->ops == &default_pipe_buf_ops;
481 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
483 size_t off = i->iov_offset;
485 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
486 idx = next_idx(idx, i->pipe);
493 static size_t push_pipe(struct iov_iter *i, size_t size,
494 int *idxp, size_t *offp)
496 struct pipe_inode_info *pipe = i->pipe;
501 if (unlikely(size > i->count))
507 data_start(i, &idx, &off);
511 left -= PAGE_SIZE - off;
513 pipe->bufs[idx].len += size;
516 pipe->bufs[idx].len = PAGE_SIZE;
517 idx = next_idx(idx, pipe);
519 while (idx != pipe->curbuf || !pipe->nrbufs) {
520 struct page *page = alloc_page(GFP_USER);
524 pipe->bufs[idx].ops = &default_pipe_buf_ops;
525 pipe->bufs[idx].page = page;
526 pipe->bufs[idx].offset = 0;
527 if (left <= PAGE_SIZE) {
528 pipe->bufs[idx].len = left;
531 pipe->bufs[idx].len = PAGE_SIZE;
533 idx = next_idx(idx, pipe);
538 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
541 struct pipe_inode_info *pipe = i->pipe;
548 bytes = n = push_pipe(i, bytes, &idx, &off);
551 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
552 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
553 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
555 i->iov_offset = off + chunk;
563 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
565 const char *from = addr;
566 if (unlikely(iov_iter_is_pipe(i)))
567 return copy_pipe_to_iter(addr, bytes, i);
568 if (iter_is_iovec(i))
570 iterate_and_advance(i, bytes, v,
571 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
572 memcpy_to_page(v.bv_page, v.bv_offset,
573 (from += v.bv_len) - v.bv_len, v.bv_len),
574 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
579 EXPORT_SYMBOL(_copy_to_iter);
581 #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
582 static int copyout_mcsafe(void __user *to, const void *from, size_t n)
584 if (access_ok(VERIFY_WRITE, to, n)) {
585 kasan_check_read(from, n);
586 n = copy_to_user_mcsafe((__force void *) to, from, n);
591 static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
592 const char *from, size_t len)
597 to = kmap_atomic(page);
598 ret = memcpy_mcsafe(to + offset, from, len);
604 static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
607 struct pipe_inode_info *pipe = i->pipe;
608 size_t n, off, xfer = 0;
614 bytes = n = push_pipe(i, bytes, &idx, &off);
617 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
618 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
621 rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
624 i->iov_offset = off + chunk - rem;
636 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
637 * @addr: source kernel address
638 * @bytes: total transfer length
639 * @iter: destination iterator
641 * The pmem driver arranges for filesystem-dax to use this facility via
642 * dax_copy_to_iter() for protecting read/write to persistent memory.
643 * Unless / until an architecture can guarantee identical performance
644 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
645 * performance regression to switch more users to the mcsafe version.
647 * Otherwise, the main differences between this and typical _copy_to_iter().
649 * * Typical tail/residue handling after a fault retries the copy
650 * byte-by-byte until the fault happens again. Re-triggering machine
651 * checks is potentially fatal so the implementation uses source
652 * alignment and poison alignment assumptions to avoid re-triggering
653 * hardware exceptions.
655 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
656 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
659 * See MCSAFE_TEST for self-test.
661 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
663 const char *from = addr;
664 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
666 if (unlikely(iov_iter_is_pipe(i)))
667 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
668 if (iter_is_iovec(i))
670 iterate_and_advance(i, bytes, v,
671 copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
673 rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
674 (from += v.bv_len) - v.bv_len, v.bv_len);
676 curr_addr = (unsigned long) from;
677 bytes = curr_addr - s_addr - rem;
682 rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
685 curr_addr = (unsigned long) from;
686 bytes = curr_addr - s_addr - rem;
694 EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
695 #endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
697 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
700 if (unlikely(iov_iter_is_pipe(i))) {
704 if (iter_is_iovec(i))
706 iterate_and_advance(i, bytes, v,
707 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
708 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
709 v.bv_offset, v.bv_len),
710 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
715 EXPORT_SYMBOL(_copy_from_iter);
717 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
720 if (unlikely(iov_iter_is_pipe(i))) {
724 if (unlikely(i->count < bytes))
727 if (iter_is_iovec(i))
729 iterate_all_kinds(i, bytes, v, ({
730 if (copyin((to += v.iov_len) - v.iov_len,
731 v.iov_base, v.iov_len))
734 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
735 v.bv_offset, v.bv_len),
736 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
739 iov_iter_advance(i, bytes);
742 EXPORT_SYMBOL(_copy_from_iter_full);
744 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
747 if (unlikely(iov_iter_is_pipe(i))) {
751 iterate_and_advance(i, bytes, v,
752 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
753 v.iov_base, v.iov_len),
754 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
755 v.bv_offset, v.bv_len),
756 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
761 EXPORT_SYMBOL(_copy_from_iter_nocache);
763 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
765 * _copy_from_iter_flushcache - write destination through cpu cache
766 * @addr: destination kernel address
767 * @bytes: total transfer length
768 * @iter: source iterator
770 * The pmem driver arranges for filesystem-dax to use this facility via
771 * dax_copy_from_iter() for ensuring that writes to persistent memory
772 * are flushed through the CPU cache. It is differentiated from
773 * _copy_from_iter_nocache() in that guarantees all data is flushed for
774 * all iterator types. The _copy_from_iter_nocache() only attempts to
775 * bypass the cache for the ITER_IOVEC case, and on some archs may use
776 * instructions that strand dirty-data in the cache.
778 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
781 if (unlikely(iov_iter_is_pipe(i))) {
785 iterate_and_advance(i, bytes, v,
786 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
787 v.iov_base, v.iov_len),
788 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
789 v.bv_offset, v.bv_len),
790 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
796 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
799 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
802 if (unlikely(iov_iter_is_pipe(i))) {
806 if (unlikely(i->count < bytes))
808 iterate_all_kinds(i, bytes, v, ({
809 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
810 v.iov_base, v.iov_len))
813 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
814 v.bv_offset, v.bv_len),
815 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
818 iov_iter_advance(i, bytes);
821 EXPORT_SYMBOL(_copy_from_iter_full_nocache);
823 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
825 struct page *head = compound_head(page);
826 size_t v = n + offset + page_address(page) - page_address(head);
828 if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
834 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
837 if (unlikely(!page_copy_sane(page, offset, bytes)))
839 if (i->type & (ITER_BVEC|ITER_KVEC)) {
840 void *kaddr = kmap_atomic(page);
841 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
842 kunmap_atomic(kaddr);
844 } else if (unlikely(iov_iter_is_discard(i)))
846 else if (likely(!iov_iter_is_pipe(i)))
847 return copy_page_to_iter_iovec(page, offset, bytes, i);
849 return copy_page_to_iter_pipe(page, offset, bytes, i);
851 EXPORT_SYMBOL(copy_page_to_iter);
853 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
856 if (unlikely(!page_copy_sane(page, offset, bytes)))
858 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
862 if (i->type & (ITER_BVEC|ITER_KVEC)) {
863 void *kaddr = kmap_atomic(page);
864 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
865 kunmap_atomic(kaddr);
868 return copy_page_from_iter_iovec(page, offset, bytes, i);
870 EXPORT_SYMBOL(copy_page_from_iter);
872 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
874 struct pipe_inode_info *pipe = i->pipe;
881 bytes = n = push_pipe(i, bytes, &idx, &off);
885 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
886 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
887 memzero_page(pipe->bufs[idx].page, off, chunk);
889 i->iov_offset = off + chunk;
896 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
898 if (unlikely(iov_iter_is_pipe(i)))
899 return pipe_zero(bytes, i);
900 iterate_and_advance(i, bytes, v,
901 clear_user(v.iov_base, v.iov_len),
902 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
903 memset(v.iov_base, 0, v.iov_len)
908 EXPORT_SYMBOL(iov_iter_zero);
910 size_t iov_iter_copy_from_user_atomic(struct page *page,
911 struct iov_iter *i, unsigned long offset, size_t bytes)
913 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
914 if (unlikely(!page_copy_sane(page, offset, bytes))) {
915 kunmap_atomic(kaddr);
918 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
919 kunmap_atomic(kaddr);
923 iterate_all_kinds(i, bytes, v,
924 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
925 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
926 v.bv_offset, v.bv_len),
927 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
929 kunmap_atomic(kaddr);
932 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
934 static inline void pipe_truncate(struct iov_iter *i)
936 struct pipe_inode_info *pipe = i->pipe;
938 size_t off = i->iov_offset;
940 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
942 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
943 idx = next_idx(idx, pipe);
946 while (pipe->nrbufs > nrbufs) {
947 pipe_buf_release(pipe, &pipe->bufs[idx]);
948 idx = next_idx(idx, pipe);
954 static void pipe_advance(struct iov_iter *i, size_t size)
956 struct pipe_inode_info *pipe = i->pipe;
957 if (unlikely(i->count < size))
960 struct pipe_buffer *buf;
961 size_t off = i->iov_offset, left = size;
963 if (off) /* make it relative to the beginning of buffer */
964 left += off - pipe->bufs[idx].offset;
966 buf = &pipe->bufs[idx];
967 if (left <= buf->len)
970 idx = next_idx(idx, pipe);
973 i->iov_offset = buf->offset + left;
976 /* ... and discard everything past that point */
980 void iov_iter_advance(struct iov_iter *i, size_t size)
982 if (unlikely(iov_iter_is_pipe(i))) {
983 pipe_advance(i, size);
986 if (unlikely(iov_iter_is_discard(i))) {
990 iterate_and_advance(i, size, v, 0, 0, 0)
992 EXPORT_SYMBOL(iov_iter_advance);
994 void iov_iter_revert(struct iov_iter *i, size_t unroll)
998 if (WARN_ON(unroll > MAX_RW_COUNT))
1001 if (unlikely(iov_iter_is_pipe(i))) {
1002 struct pipe_inode_info *pipe = i->pipe;
1004 size_t off = i->iov_offset;
1006 size_t n = off - pipe->bufs[idx].offset;
1012 if (!unroll && idx == i->start_idx) {
1017 idx = pipe->buffers - 1;
1018 off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
1020 i->iov_offset = off;
1025 if (unlikely(iov_iter_is_discard(i)))
1027 if (unroll <= i->iov_offset) {
1028 i->iov_offset -= unroll;
1031 unroll -= i->iov_offset;
1032 if (iov_iter_is_bvec(i)) {
1033 const struct bio_vec *bvec = i->bvec;
1035 size_t n = (--bvec)->bv_len;
1039 i->iov_offset = n - unroll;
1044 } else { /* same logics for iovec and kvec */
1045 const struct iovec *iov = i->iov;
1047 size_t n = (--iov)->iov_len;
1051 i->iov_offset = n - unroll;
1058 EXPORT_SYMBOL(iov_iter_revert);
1061 * Return the count of just the current iov_iter segment.
1063 size_t iov_iter_single_seg_count(const struct iov_iter *i)
1065 if (unlikely(iov_iter_is_pipe(i)))
1066 return i->count; // it is a silly place, anyway
1067 if (i->nr_segs == 1)
1069 if (unlikely(iov_iter_is_discard(i)))
1071 else if (iov_iter_is_bvec(i))
1072 return min(i->count, i->bvec->bv_len - i->iov_offset);
1074 return min(i->count, i->iov->iov_len - i->iov_offset);
1076 EXPORT_SYMBOL(iov_iter_single_seg_count);
1078 void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1079 const struct kvec *kvec, unsigned long nr_segs,
1082 WARN_ON(direction & ~(READ | WRITE));
1083 i->type = ITER_KVEC | (direction & (READ | WRITE));
1085 i->nr_segs = nr_segs;
1089 EXPORT_SYMBOL(iov_iter_kvec);
1091 void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1092 const struct bio_vec *bvec, unsigned long nr_segs,
1095 WARN_ON(direction & ~(READ | WRITE));
1096 i->type = ITER_BVEC | (direction & (READ | WRITE));
1098 i->nr_segs = nr_segs;
1102 EXPORT_SYMBOL(iov_iter_bvec);
1104 void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1105 struct pipe_inode_info *pipe,
1108 BUG_ON(direction != READ);
1109 WARN_ON(pipe->nrbufs == pipe->buffers);
1110 i->type = ITER_PIPE | READ;
1112 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1115 i->start_idx = i->idx;
1117 EXPORT_SYMBOL(iov_iter_pipe);
1120 * iov_iter_discard - Initialise an I/O iterator that discards data
1121 * @i: The iterator to initialise.
1122 * @direction: The direction of the transfer.
1123 * @count: The size of the I/O buffer in bytes.
1125 * Set up an I/O iterator that just discards everything that's written to it.
1126 * It's only available as a READ iterator.
1128 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1130 BUG_ON(direction != READ);
1131 i->type = ITER_DISCARD | READ;
1135 EXPORT_SYMBOL(iov_iter_discard);
1137 unsigned long iov_iter_alignment(const struct iov_iter *i)
1139 unsigned long res = 0;
1140 size_t size = i->count;
1142 if (unlikely(iov_iter_is_pipe(i))) {
1143 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1144 return size | i->iov_offset;
1147 iterate_all_kinds(i, size, v,
1148 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
1149 res |= v.bv_offset | v.bv_len,
1150 res |= (unsigned long)v.iov_base | v.iov_len
1154 EXPORT_SYMBOL(iov_iter_alignment);
1156 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1158 unsigned long res = 0;
1159 size_t size = i->count;
1161 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1166 iterate_all_kinds(i, size, v,
1167 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1168 (size != v.iov_len ? size : 0), 0),
1169 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1170 (size != v.bv_len ? size : 0)),
1171 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
1172 (size != v.iov_len ? size : 0))
1176 EXPORT_SYMBOL(iov_iter_gap_alignment);
1178 static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1180 struct page **pages,
1184 struct pipe_inode_info *pipe = i->pipe;
1185 ssize_t n = push_pipe(i, maxsize, &idx, start);
1192 get_page(*pages++ = pipe->bufs[idx].page);
1193 idx = next_idx(idx, pipe);
1200 static ssize_t pipe_get_pages(struct iov_iter *i,
1201 struct page **pages, size_t maxsize, unsigned maxpages,
1214 data_start(i, &idx, start);
1215 /* some of this one + all after this one */
1216 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1217 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
1219 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
1222 ssize_t iov_iter_get_pages(struct iov_iter *i,
1223 struct page **pages, size_t maxsize, unsigned maxpages,
1226 if (maxsize > i->count)
1229 if (unlikely(iov_iter_is_pipe(i)))
1230 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1231 if (unlikely(iov_iter_is_discard(i)))
1234 iterate_all_kinds(i, maxsize, v, ({
1235 unsigned long addr = (unsigned long)v.iov_base;
1236 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1240 if (len > maxpages * PAGE_SIZE)
1241 len = maxpages * PAGE_SIZE;
1242 addr &= ~(PAGE_SIZE - 1);
1243 n = DIV_ROUND_UP(len, PAGE_SIZE);
1244 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1245 if (unlikely(res < 0))
1247 return (res == n ? len : res * PAGE_SIZE) - *start;
1249 /* can't be more than PAGE_SIZE */
1250 *start = v.bv_offset;
1251 get_page(*pages = v.bv_page);
1259 EXPORT_SYMBOL(iov_iter_get_pages);
1261 static struct page **get_pages_array(size_t n)
1263 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
1266 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1267 struct page ***pages, size_t maxsize,
1281 data_start(i, &idx, start);
1282 /* some of this one + all after this one */
1283 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1284 n = npages * PAGE_SIZE - *start;
1288 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1289 p = get_pages_array(npages);
1292 n = __pipe_get_pages(i, maxsize, p, idx, start);
1300 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1301 struct page ***pages, size_t maxsize,
1306 if (maxsize > i->count)
1309 if (unlikely(iov_iter_is_pipe(i)))
1310 return pipe_get_pages_alloc(i, pages, maxsize, start);
1311 if (unlikely(iov_iter_is_discard(i)))
1314 iterate_all_kinds(i, maxsize, v, ({
1315 unsigned long addr = (unsigned long)v.iov_base;
1316 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1320 addr &= ~(PAGE_SIZE - 1);
1321 n = DIV_ROUND_UP(len, PAGE_SIZE);
1322 p = get_pages_array(n);
1325 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1326 if (unlikely(res < 0)) {
1331 return (res == n ? len : res * PAGE_SIZE) - *start;
1333 /* can't be more than PAGE_SIZE */
1334 *start = v.bv_offset;
1335 *pages = p = get_pages_array(1);
1338 get_page(*p = v.bv_page);
1346 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1348 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1355 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1359 iterate_and_advance(i, bytes, v, ({
1361 next = csum_and_copy_from_user(v.iov_base,
1362 (to += v.iov_len) - v.iov_len,
1363 v.iov_len, 0, &err);
1365 sum = csum_block_add(sum, next, off);
1368 err ? v.iov_len : 0;
1370 char *p = kmap_atomic(v.bv_page);
1371 next = csum_partial_copy_nocheck(p + v.bv_offset,
1372 (to += v.bv_len) - v.bv_len,
1375 sum = csum_block_add(sum, next, off);
1378 next = csum_partial_copy_nocheck(v.iov_base,
1379 (to += v.iov_len) - v.iov_len,
1381 sum = csum_block_add(sum, next, off);
1388 EXPORT_SYMBOL(csum_and_copy_from_iter);
1390 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1397 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1401 if (unlikely(i->count < bytes))
1403 iterate_all_kinds(i, bytes, v, ({
1405 next = csum_and_copy_from_user(v.iov_base,
1406 (to += v.iov_len) - v.iov_len,
1407 v.iov_len, 0, &err);
1410 sum = csum_block_add(sum, next, off);
1414 char *p = kmap_atomic(v.bv_page);
1415 next = csum_partial_copy_nocheck(p + v.bv_offset,
1416 (to += v.bv_len) - v.bv_len,
1419 sum = csum_block_add(sum, next, off);
1422 next = csum_partial_copy_nocheck(v.iov_base,
1423 (to += v.iov_len) - v.iov_len,
1425 sum = csum_block_add(sum, next, off);
1430 iov_iter_advance(i, bytes);
1433 EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1435 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1438 const char *from = addr;
1442 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1443 WARN_ON(1); /* for now */
1446 iterate_and_advance(i, bytes, v, ({
1448 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1450 v.iov_len, 0, &err);
1452 sum = csum_block_add(sum, next, off);
1455 err ? v.iov_len : 0;
1457 char *p = kmap_atomic(v.bv_page);
1458 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1462 sum = csum_block_add(sum, next, off);
1465 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1468 sum = csum_block_add(sum, next, off);
1475 EXPORT_SYMBOL(csum_and_copy_to_iter);
1477 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1479 size_t size = i->count;
1484 if (unlikely(iov_iter_is_discard(i)))
1487 if (unlikely(iov_iter_is_pipe(i))) {
1488 struct pipe_inode_info *pipe = i->pipe;
1495 data_start(i, &idx, &off);
1496 /* some of this one + all after this one */
1497 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1498 if (npages >= maxpages)
1500 } else iterate_all_kinds(i, size, v, ({
1501 unsigned long p = (unsigned long)v.iov_base;
1502 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1504 if (npages >= maxpages)
1508 if (npages >= maxpages)
1511 unsigned long p = (unsigned long)v.iov_base;
1512 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1514 if (npages >= maxpages)
1520 EXPORT_SYMBOL(iov_iter_npages);
1522 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1525 if (unlikely(iov_iter_is_pipe(new))) {
1529 if (unlikely(iov_iter_is_discard(new)))
1531 if (iov_iter_is_bvec(new))
1532 return new->bvec = kmemdup(new->bvec,
1533 new->nr_segs * sizeof(struct bio_vec),
1536 /* iovec and kvec have identical layout */
1537 return new->iov = kmemdup(new->iov,
1538 new->nr_segs * sizeof(struct iovec),
1541 EXPORT_SYMBOL(dup_iter);
1544 * import_iovec() - Copy an array of &struct iovec from userspace
1545 * into the kernel, check that it is valid, and initialize a new
1546 * &struct iov_iter iterator to access it.
1548 * @type: One of %READ or %WRITE.
1549 * @uvector: Pointer to the userspace array.
1550 * @nr_segs: Number of elements in userspace array.
1551 * @fast_segs: Number of elements in @iov.
1552 * @iov: (input and output parameter) Pointer to pointer to (usually small
1553 * on-stack) kernel array.
1554 * @i: Pointer to iterator that will be initialized on success.
1556 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1557 * then this function places %NULL in *@iov on return. Otherwise, a new
1558 * array will be allocated and the result placed in *@iov. This means that
1559 * the caller may call kfree() on *@iov regardless of whether the small
1560 * on-stack array was used or not (and regardless of whether this function
1561 * returns an error or not).
1563 * Return: 0 on success or negative error code on error.
1565 int import_iovec(int type, const struct iovec __user * uvector,
1566 unsigned nr_segs, unsigned fast_segs,
1567 struct iovec **iov, struct iov_iter *i)
1571 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1579 iov_iter_init(i, type, p, nr_segs, n);
1580 *iov = p == *iov ? NULL : p;
1583 EXPORT_SYMBOL(import_iovec);
1585 #ifdef CONFIG_COMPAT
1586 #include <linux/compat.h>
1588 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1589 unsigned nr_segs, unsigned fast_segs,
1590 struct iovec **iov, struct iov_iter *i)
1594 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1602 iov_iter_init(i, type, p, nr_segs, n);
1603 *iov = p == *iov ? NULL : p;
1608 int import_single_range(int rw, void __user *buf, size_t len,
1609 struct iovec *iov, struct iov_iter *i)
1611 if (len > MAX_RW_COUNT)
1613 if (unlikely(!access_ok(!rw, buf, len)))
1616 iov->iov_base = buf;
1618 iov_iter_init(i, rw, iov, 1, len);
1621 EXPORT_SYMBOL(import_single_range);
1623 int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1624 int (*f)(struct kvec *vec, void *context),
1632 iterate_all_kinds(i, bytes, v, -EINVAL, ({
1633 w.iov_base = kmap(v.bv_page) + v.bv_offset;
1634 w.iov_len = v.bv_len;
1635 err = f(&w, context);
1639 err = f(&w, context);})
1643 EXPORT_SYMBOL(iov_iter_for_each_range);