]> Git Repo - linux.git/blobdiff - block/blk-map.c
Merge tag 'pci-v6.1-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[linux.git] / block / blk-map.c
index 84b13a4158b7caaae8f2162d0fe7cff231966c85..34735626b00f3d125563cdd3799cc9756125d694 100644 (file)
@@ -241,17 +241,10 @@ static void blk_mq_map_bio_put(struct bio *bio)
        }
 }
 
-static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
-               gfp_t gfp_mask)
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+               unsigned int nr_vecs, gfp_t gfp_mask)
 {
-       unsigned int max_sectors = queue_max_hw_sectors(rq->q);
-       unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
        struct bio *bio;
-       int ret;
-       int j;
-
-       if (!iov_iter_count(iter))
-               return -EINVAL;
 
        if (rq->cmd_flags & REQ_POLLED) {
                blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
@@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
                bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
                                        &fs_bio_set);
                if (!bio)
-                       return -ENOMEM;
+                       return NULL;
        } else {
                bio = bio_kmalloc(nr_vecs, gfp_mask);
                if (!bio)
-                       return -ENOMEM;
+                       return NULL;
                bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
        }
+       return bio;
+}
+
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+               gfp_t gfp_mask)
+{
+       unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+       unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+       struct bio *bio;
+       int ret;
+       int j;
+
+       if (!iov_iter_count(iter))
+               return -EINVAL;
+
+       bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
+       if (bio == NULL)
+               return -ENOMEM;
 
        while (iov_iter_count(iter)) {
                struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -537,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
 }
 EXPORT_SYMBOL(blk_rq_append_bio);
 
+/* Prepare bio for passthrough IO given ITER_BVEC iter */
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+{
+       struct request_queue *q = rq->q;
+       size_t nr_iter = iov_iter_count(iter);
+       size_t nr_segs = iter->nr_segs;
+       struct bio_vec *bvecs, *bvprvp = NULL;
+       struct queue_limits *lim = &q->limits;
+       unsigned int nsegs = 0, bytes = 0;
+       struct bio *bio;
+       size_t i;
+
+       if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+               return -EINVAL;
+       if (nr_segs > queue_max_segments(q))
+               return -EINVAL;
+
+       /* no iovecs to alloc, as we already have a BVEC iterator */
+       bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+       if (bio == NULL)
+               return -ENOMEM;
+
+       bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+       blk_rq_bio_prep(rq, bio, nr_segs);
+
+       /* loop to perform a bunch of sanity checks */
+       bvecs = (struct bio_vec *)iter->bvec;
+       for (i = 0; i < nr_segs; i++) {
+               struct bio_vec *bv = &bvecs[i];
+
+               /*
+                * If the queue doesn't support SG gaps and adding this
+                * offset would create a gap, fallback to copy.
+                */
+               if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+                       blk_mq_map_bio_put(bio);
+                       return -EREMOTEIO;
+               }
+               /* check full condition */
+               if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+                       goto put_bio;
+               if (bytes + bv->bv_len > nr_iter)
+                       goto put_bio;
+               if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
+                       goto put_bio;
+
+               nsegs++;
+               bytes += bv->bv_len;
+               bvprvp = bv;
+       }
+       return 0;
+put_bio:
+       blk_mq_map_bio_put(bio);
+       return -EINVAL;
+}
+
 /**
  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  * @q:         request queue where request should be inserted
@@ -556,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct rq_map_data *map_data,
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
-       bool copy = false;
+       bool copy = false, map_bvec = false;
        unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
        struct bio *bio = NULL;
        struct iov_iter i;
        int ret = -EINVAL;
 
-       if (!iter_is_iovec(iter))
-               goto fail;
-
        if (map_data)
                copy = true;
        else if (blk_queue_may_bounce(q))
                copy = true;
        else if (iov_iter_alignment(iter) & align)
                copy = true;
+       else if (iov_iter_is_bvec(iter))
+               map_bvec = true;
+       else if (!iter_is_iovec(iter))
+               copy = true;
        else if (queue_virt_boundary(q))
                copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
+       if (map_bvec) {
+               ret = blk_rq_map_user_bvec(rq, iter);
+               if (!ret)
+                       return 0;
+               if (ret != -EREMOTEIO)
+                       goto fail;
+               /* fall back to copying the data on limits mismatches */
+               copy = true;
+       }
+
        i = *iter;
        do {
                if (copy)
This page took 0.032819 seconds and 4 git commands to generate.