]> Git Repo - linux.git/blobdiff - block/blk-map.c
Merge tag 'pci-v6.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux.git] / block / blk-map.c
index 7196a6b64c80271e60a4917e95204da6b283b7cc..34735626b00f3d125563cdd3799cc9756125d694 100644 (file)
@@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
        bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
 
        if (map_data) {
-               nr_pages = 1 << map_data->page_order;
+               nr_pages = 1U << map_data->page_order;
                i = map_data->offset / PAGE_SIZE;
        }
        while (len) {
@@ -231,6 +231,37 @@ out_bmd:
        return ret;
 }
 
+static void blk_mq_map_bio_put(struct bio *bio)
+{
+       if (bio->bi_opf & REQ_ALLOC_CACHE) {
+               bio_put(bio);
+       } else {
+               bio_uninit(bio);
+               kfree(bio);
+       }
+}
+
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+               unsigned int nr_vecs, gfp_t gfp_mask)
+{
+       struct bio *bio;
+
+       if (rq->cmd_flags & REQ_POLLED) {
+               blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
+
+               bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
+                                       &fs_bio_set);
+               if (!bio)
+                       return NULL;
+       } else {
+               bio = bio_kmalloc(nr_vecs, gfp_mask);
+               if (!bio)
+                       return NULL;
+               bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
+       }
+       return bio;
+}
+
 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
                gfp_t gfp_mask)
 {
@@ -243,18 +274,24 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
        if (!iov_iter_count(iter))
                return -EINVAL;
 
-       bio = bio_kmalloc(nr_vecs, gfp_mask);
-       if (!bio)
+       bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
+       if (bio == NULL)
                return -ENOMEM;
-       bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
 
        while (iov_iter_count(iter)) {
-               struct page **pages;
+               struct page **pages, *stack_pages[UIO_FASTIOV];
                ssize_t bytes;
-               size_t offs, added = 0;
+               size_t offs;
                int npages;
 
-               bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
+               if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
+                       pages = stack_pages;
+                       bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
+                                                       nr_vecs, &offs);
+               } else {
+                       bytes = iov_iter_get_pages_alloc2(iter, &pages,
+                                                       LONG_MAX, &offs);
+               }
                if (unlikely(bytes <= 0)) {
                        ret = bytes ? bytes : -EFAULT;
                        goto out_unmap;
@@ -280,7 +317,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
                                        break;
                                }
 
-                               added += n;
                                bytes -= n;
                                offs = 0;
                        }
@@ -290,7 +326,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
                 */
                while (j < npages)
                        put_page(pages[j++]);
-               kvfree(pages);
+               if (pages != stack_pages)
+                       kvfree(pages);
                /* couldn't stuff something into bio? */
                if (bytes) {
                        iov_iter_revert(iter, bytes);
@@ -305,8 +342,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 
  out_unmap:
        bio_release_pages(bio, false);
-       bio_uninit(bio);
-       kfree(bio);
+       blk_mq_map_bio_put(bio);
        return ret;
 }
 
@@ -512,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
 }
 EXPORT_SYMBOL(blk_rq_append_bio);
 
+/* Prepare bio for passthrough IO given ITER_BVEC iter */
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+{
+       struct request_queue *q = rq->q;
+       size_t nr_iter = iov_iter_count(iter);
+       size_t nr_segs = iter->nr_segs;
+       struct bio_vec *bvecs, *bvprvp = NULL;
+       struct queue_limits *lim = &q->limits;
+       unsigned int nsegs = 0, bytes = 0;
+       struct bio *bio;
+       size_t i;
+
+       if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+               return -EINVAL;
+       if (nr_segs > queue_max_segments(q))
+               return -EINVAL;
+
+       /* no iovecs to alloc, as we already have a BVEC iterator */
+       bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+       if (bio == NULL)
+               return -ENOMEM;
+
+       bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+       blk_rq_bio_prep(rq, bio, nr_segs);
+
+       /* loop to perform a bunch of sanity checks */
+       bvecs = (struct bio_vec *)iter->bvec;
+       for (i = 0; i < nr_segs; i++) {
+               struct bio_vec *bv = &bvecs[i];
+
+               /*
+                * If the queue doesn't support SG gaps and adding this
+                * offset would create a gap, fallback to copy.
+                */
+               if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+                       blk_mq_map_bio_put(bio);
+                       return -EREMOTEIO;
+               }
+               /* check full condition */
+               if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+                       goto put_bio;
+               if (bytes + bv->bv_len > nr_iter)
+                       goto put_bio;
+               if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
+                       goto put_bio;
+
+               nsegs++;
+               bytes += bv->bv_len;
+               bvprvp = bv;
+       }
+       return 0;
+put_bio:
+       blk_mq_map_bio_put(bio);
+       return -EINVAL;
+}
+
 /**
  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  * @q:         request queue where request should be inserted
@@ -531,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct rq_map_data *map_data,
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
-       bool copy = false;
+       bool copy = false, map_bvec = false;
        unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
        struct bio *bio = NULL;
        struct iov_iter i;
        int ret = -EINVAL;
 
-       if (!iter_is_iovec(iter))
-               goto fail;
-
        if (map_data)
                copy = true;
        else if (blk_queue_may_bounce(q))
                copy = true;
        else if (iov_iter_alignment(iter) & align)
                copy = true;
+       else if (iov_iter_is_bvec(iter))
+               map_bvec = true;
+       else if (!iter_is_iovec(iter))
+               copy = true;
        else if (queue_virt_boundary(q))
                copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
+       if (map_bvec) {
+               ret = blk_rq_map_user_bvec(rq, iter);
+               if (!ret)
+                       return 0;
+               if (ret != -EREMOTEIO)
+                       goto fail;
+               /* fall back to copying the data on limits mismatches */
+               copy = true;
+       }
+
        i = *iter;
        do {
                if (copy)
@@ -586,6 +689,42 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
 }
 EXPORT_SYMBOL(blk_rq_map_user);
 
+int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
+               void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
+               bool vec, int iov_count, bool check_iter_count, int rw)
+{
+       int ret = 0;
+
+       if (vec) {
+               struct iovec fast_iov[UIO_FASTIOV];
+               struct iovec *iov = fast_iov;
+               struct iov_iter iter;
+
+               ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
+                               UIO_FASTIOV, &iov, &iter);
+               if (ret < 0)
+                       return ret;
+
+               if (iov_count) {
+                       /* SG_IO howto says that the shorter of the two wins */
+                       iov_iter_truncate(&iter, buf_len);
+                       if (check_iter_count && !iov_iter_count(&iter)) {
+                               kfree(iov);
+                               return -EINVAL;
+                       }
+               }
+
+               ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
+                               gfp_mask);
+               kfree(iov);
+       } else if (buf_len) {
+               ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
+                               gfp_mask);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(blk_rq_map_user_io);
+
 /**
  * blk_rq_unmap_user - unmap a request with user data
  * @bio:              start of bio list
@@ -611,8 +750,7 @@ int blk_rq_unmap_user(struct bio *bio)
 
                next_bio = bio;
                bio = bio->bi_next;
-               bio_uninit(next_bio);
-               kfree(next_bio);
+               blk_mq_map_bio_put(next_bio);
        }
 
        return ret;
This page took 0.039384 seconds and 4 git commands to generate.