]>
Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to mapping data to requests | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
26e49cfc | 8 | #include <linux/uio.h> |
86db1e29 JA |
9 | |
10 | #include "blk.h" | |
11 | ||
46348456 SG |
12 | static bool iovec_gap_to_prv(struct request_queue *q, |
13 | struct iovec *prv, struct iovec *cur) | |
14 | { | |
15 | unsigned long prev_end; | |
16 | ||
17 | if (!queue_virt_boundary(q)) | |
18 | return false; | |
19 | ||
20 | if (prv->iov_base == NULL && prv->iov_len == 0) | |
21 | /* prv is not set - don't check */ | |
22 | return false; | |
23 | ||
24 | prev_end = (unsigned long)(prv->iov_base + prv->iov_len); | |
25 | ||
26 | return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) || | |
27 | prev_end & queue_virt_boundary(q)); | |
28 | } | |
29 | ||
86db1e29 JA |
30 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
31 | struct bio *bio) | |
32 | { | |
33 | if (!rq->bio) | |
34 | blk_rq_bio_prep(q, rq, bio); | |
35 | else if (!ll_back_merge_fn(q, rq, bio)) | |
36 | return -EINVAL; | |
37 | else { | |
38 | rq->biotail->bi_next = bio; | |
39 | rq->biotail = bio; | |
40 | ||
4f024f37 | 41 | rq->__data_len += bio->bi_iter.bi_size; |
86db1e29 JA |
42 | } |
43 | return 0; | |
44 | } | |
86db1e29 JA |
45 | |
46 | static int __blk_rq_unmap_user(struct bio *bio) | |
47 | { | |
48 | int ret = 0; | |
49 | ||
50 | if (bio) { | |
51 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
52 | bio_unmap_user(bio); | |
53 | else | |
54 | ret = bio_uncopy_user(bio); | |
55 | } | |
56 | ||
57 | return ret; | |
58 | } | |
59 | ||
86db1e29 | 60 | /** |
710027a4 | 61 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e29 JA |
62 | * @q: request queue where request should be inserted |
63 | * @rq: request to map data to | |
152e283f | 64 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc | 65 | * @iter: iovec iterator |
a3bce90e | 66 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
67 | * |
68 | * Description: | |
710027a4 | 69 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
70 | * a kernel bounce buffer is used. |
71 | * | |
710027a4 | 72 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 JA |
73 | * still in process context. |
74 | * | |
75 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
76 | * before being submitted to the device, as pages mapped may be out of | |
77 | * reach. It's the callers responsibility to make sure this happens. The | |
78 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
79 | * unmapping. | |
80 | */ | |
81 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
26e49cfc KO |
82 | struct rq_map_data *map_data, |
83 | const struct iov_iter *iter, gfp_t gfp_mask) | |
86db1e29 JA |
84 | { |
85 | struct bio *bio; | |
afdc1a78 | 86 | int unaligned = 0; |
26e49cfc | 87 | struct iov_iter i; |
46348456 | 88 | struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; |
86db1e29 | 89 | |
26e49cfc | 90 | if (!iter || !iter->count) |
86db1e29 JA |
91 | return -EINVAL; |
92 | ||
26e49cfc KO |
93 | iov_for_each(iov, i, *iter) { |
94 | unsigned long uaddr = (unsigned long) iov.iov_base; | |
afdc1a78 | 95 | |
26e49cfc | 96 | if (!iov.iov_len) |
54787556 XF |
97 | return -EINVAL; |
98 | ||
6b76106d BH |
99 | /* |
100 | * Keep going so we check length of all segments | |
101 | */ | |
46348456 SG |
102 | if ((uaddr & queue_dma_alignment(q)) || |
103 | iovec_gap_to_prv(q, &prv, &iov)) | |
afdc1a78 | 104 | unaligned = 1; |
46348456 SG |
105 | |
106 | prv.iov_base = iov.iov_base; | |
107 | prv.iov_len = iov.iov_len; | |
afdc1a78 FT |
108 | } |
109 | ||
26e49cfc KO |
110 | if (unaligned || (q->dma_pad_mask & iter->count) || map_data) |
111 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); | |
afdc1a78 | 112 | else |
37f19e57 | 113 | bio = bio_map_user_iov(q, iter, gfp_mask); |
afdc1a78 | 114 | |
86db1e29 JA |
115 | if (IS_ERR(bio)) |
116 | return PTR_ERR(bio); | |
117 | ||
a0763b27 | 118 | if (map_data && map_data->null_mapped) |
b7c44ed9 | 119 | bio_set_flag(bio, BIO_NULL_MAPPED); |
a0763b27 | 120 | |
26e49cfc | 121 | if (bio->bi_iter.bi_size != iter->count) { |
c26156b2 JA |
122 | /* |
123 | * Grab an extra reference to this bio, as bio_unmap_user() | |
124 | * expects to be able to drop it twice as it happens on the | |
125 | * normal IO completion path | |
126 | */ | |
127 | bio_get(bio); | |
4246a0b6 | 128 | bio_endio(bio); |
53cc0b29 | 129 | __blk_rq_unmap_user(bio); |
86db1e29 JA |
130 | return -EINVAL; |
131 | } | |
132 | ||
f18573ab FT |
133 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
134 | rq->cmd_flags |= REQ_COPY_USER; | |
135 | ||
07359fc6 | 136 | blk_queue_bounce(q, &bio); |
86db1e29 JA |
137 | bio_get(bio); |
138 | blk_rq_bio_prep(q, rq, bio); | |
86db1e29 JA |
139 | return 0; |
140 | } | |
152e283f | 141 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e29 | 142 | |
ddad8dd0 CH |
143 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
144 | struct rq_map_data *map_data, void __user *ubuf, | |
145 | unsigned long len, gfp_t gfp_mask) | |
146 | { | |
26e49cfc KO |
147 | struct iovec iov; |
148 | struct iov_iter i; | |
8f7e885a | 149 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0 | 150 | |
8f7e885a AV |
151 | if (unlikely(ret < 0)) |
152 | return ret; | |
ddad8dd0 | 153 | |
26e49cfc | 154 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0 CH |
155 | } |
156 | EXPORT_SYMBOL(blk_rq_map_user); | |
157 | ||
86db1e29 JA |
158 | /** |
159 | * blk_rq_unmap_user - unmap a request with user data | |
160 | * @bio: start of bio list | |
161 | * | |
162 | * Description: | |
163 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
164 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
710027a4 | 165 | * the I/O completion may have changed rq->bio. |
86db1e29 JA |
166 | */ |
167 | int blk_rq_unmap_user(struct bio *bio) | |
168 | { | |
169 | struct bio *mapped_bio; | |
170 | int ret = 0, ret2; | |
171 | ||
172 | while (bio) { | |
173 | mapped_bio = bio; | |
174 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
175 | mapped_bio = bio->bi_private; | |
176 | ||
177 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
178 | if (ret2 && !ret) | |
179 | ret = ret2; | |
180 | ||
181 | mapped_bio = bio; | |
182 | bio = bio->bi_next; | |
183 | bio_put(mapped_bio); | |
184 | } | |
185 | ||
186 | return ret; | |
187 | } | |
86db1e29 JA |
188 | EXPORT_SYMBOL(blk_rq_unmap_user); |
189 | ||
190 | /** | |
710027a4 | 191 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e29 JA |
192 | * @q: request queue where request should be inserted |
193 | * @rq: request to fill | |
194 | * @kbuf: the kernel buffer | |
195 | * @len: length of user data | |
196 | * @gfp_mask: memory allocation flags | |
68154e90 FT |
197 | * |
198 | * Description: | |
199 | * Data will be mapped directly if possible. Otherwise a bounce | |
e227867f | 200 | * buffer is used. Can be called multiple times to append multiple |
3a5a3927 | 201 | * buffers. |
86db1e29 JA |
202 | */ |
203 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
204 | unsigned int len, gfp_t gfp_mask) | |
205 | { | |
68154e90 | 206 | int reading = rq_data_dir(rq) == READ; |
14417799 | 207 | unsigned long addr = (unsigned long) kbuf; |
68154e90 | 208 | int do_copy = 0; |
86db1e29 | 209 | struct bio *bio; |
3a5a3927 | 210 | int ret; |
86db1e29 | 211 | |
ae03bf63 | 212 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 JA |
213 | return -EINVAL; |
214 | if (!len || !kbuf) | |
215 | return -EINVAL; | |
216 | ||
14417799 | 217 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90 FT |
218 | if (do_copy) |
219 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
220 | else | |
221 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
222 | ||
86db1e29 JA |
223 | if (IS_ERR(bio)) |
224 | return PTR_ERR(bio); | |
225 | ||
609f6ea1 | 226 | if (!reading) |
a45dc2d2 | 227 | bio->bi_rw |= REQ_WRITE; |
86db1e29 | 228 | |
68154e90 FT |
229 | if (do_copy) |
230 | rq->cmd_flags |= REQ_COPY_USER; | |
231 | ||
3a5a3927 JB |
232 | ret = blk_rq_append_bio(q, rq, bio); |
233 | if (unlikely(ret)) { | |
234 | /* request is too big */ | |
235 | bio_put(bio); | |
236 | return ret; | |
237 | } | |
238 | ||
86db1e29 | 239 | blk_queue_bounce(q, &rq->bio); |
86db1e29 JA |
240 | return 0; |
241 | } | |
86db1e29 | 242 | EXPORT_SYMBOL(blk_rq_map_kern); |