]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 JA |
2 | /* |
3 | * Functions related to mapping data to requests | |
4 | */ | |
5 | #include <linux/kernel.h> | |
68db0cf1 | 6 | #include <linux/sched/task_stack.h> |
86db1e29 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
26e49cfc | 10 | #include <linux/uio.h> |
86db1e29 JA |
11 | |
12 | #include "blk.h" | |
13 | ||
130879f1 | 14 | struct bio_map_data { |
f3256075 CH |
15 | bool is_our_pages : 1; |
16 | bool is_null_mapped : 1; | |
130879f1 CH |
17 | struct iov_iter iter; |
18 | struct iovec iov[]; | |
19 | }; | |
20 | ||
21 | static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, | |
22 | gfp_t gfp_mask) | |
23 | { | |
24 | struct bio_map_data *bmd; | |
25 | ||
26 | if (data->nr_segs > UIO_MAXIOV) | |
27 | return NULL; | |
28 | ||
29 | bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); | |
30 | if (!bmd) | |
31 | return NULL; | |
32 | memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); | |
33 | bmd->iter = *data; | |
34 | bmd->iter.iov = bmd->iov; | |
35 | return bmd; | |
36 | } | |
37 | ||
38 | /** | |
39 | * bio_copy_from_iter - copy all pages from iov_iter to bio | |
40 | * @bio: The &struct bio which describes the I/O as destination | |
41 | * @iter: iov_iter as source | |
42 | * | |
43 | * Copy all pages from iov_iter to bio. | |
44 | * Returns 0 on success, or error on failure. | |
45 | */ | |
46 | static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) | |
47 | { | |
48 | struct bio_vec *bvec; | |
49 | struct bvec_iter_all iter_all; | |
50 | ||
51 | bio_for_each_segment_all(bvec, bio, iter_all) { | |
52 | ssize_t ret; | |
53 | ||
54 | ret = copy_page_from_iter(bvec->bv_page, | |
55 | bvec->bv_offset, | |
56 | bvec->bv_len, | |
57 | iter); | |
58 | ||
59 | if (!iov_iter_count(iter)) | |
60 | break; | |
61 | ||
62 | if (ret < bvec->bv_len) | |
63 | return -EFAULT; | |
64 | } | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | /** | |
70 | * bio_copy_to_iter - copy all pages from bio to iov_iter | |
71 | * @bio: The &struct bio which describes the I/O as source | |
72 | * @iter: iov_iter as destination | |
73 | * | |
74 | * Copy all pages from bio to iov_iter. | |
75 | * Returns 0 on success, or error on failure. | |
76 | */ | |
77 | static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) | |
78 | { | |
79 | struct bio_vec *bvec; | |
80 | struct bvec_iter_all iter_all; | |
81 | ||
82 | bio_for_each_segment_all(bvec, bio, iter_all) { | |
83 | ssize_t ret; | |
84 | ||
85 | ret = copy_page_to_iter(bvec->bv_page, | |
86 | bvec->bv_offset, | |
87 | bvec->bv_len, | |
88 | &iter); | |
89 | ||
90 | if (!iov_iter_count(&iter)) | |
91 | break; | |
92 | ||
93 | if (ret < bvec->bv_len) | |
94 | return -EFAULT; | |
95 | } | |
96 | ||
97 | return 0; | |
98 | } | |
99 | ||
100 | /** | |
101 | * bio_uncopy_user - finish previously mapped bio | |
102 | * @bio: bio being terminated | |
103 | * | |
104 | * Free pages allocated from bio_copy_user_iov() and write back data | |
105 | * to user space in case of a read. | |
106 | */ | |
107 | static int bio_uncopy_user(struct bio *bio) | |
108 | { | |
109 | struct bio_map_data *bmd = bio->bi_private; | |
110 | int ret = 0; | |
111 | ||
3310eeba | 112 | if (!bmd->is_null_mapped) { |
130879f1 CH |
113 | /* |
114 | * if we're in a workqueue, the request is orphaned, so | |
115 | * don't copy into a random user address space, just free | |
116 | * and return -EINTR so user space doesn't expect any data. | |
117 | */ | |
118 | if (!current->mm) | |
119 | ret = -EINTR; | |
120 | else if (bio_data_dir(bio) == READ) | |
121 | ret = bio_copy_to_iter(bio, bmd->iter); | |
122 | if (bmd->is_our_pages) | |
123 | bio_free_pages(bio); | |
124 | } | |
125 | kfree(bmd); | |
130879f1 CH |
126 | return ret; |
127 | } | |
128 | ||
7589ad67 CH |
129 | static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, |
130 | struct iov_iter *iter, gfp_t gfp_mask) | |
130879f1 CH |
131 | { |
132 | struct bio_map_data *bmd; | |
133 | struct page *page; | |
393bb12e | 134 | struct bio *bio; |
130879f1 CH |
135 | int i = 0, ret; |
136 | int nr_pages; | |
137 | unsigned int len = iter->count; | |
138 | unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; | |
139 | ||
140 | bmd = bio_alloc_map_data(iter, gfp_mask); | |
141 | if (!bmd) | |
7589ad67 | 142 | return -ENOMEM; |
130879f1 CH |
143 | |
144 | /* | |
145 | * We need to do a deep copy of the iov_iter including the iovecs. | |
146 | * The caller provided iov might point to an on-stack or otherwise | |
147 | * shortlived one. | |
148 | */ | |
f3256075 | 149 | bmd->is_our_pages = !map_data; |
03859717 | 150 | bmd->is_null_mapped = (map_data && map_data->null_mapped); |
130879f1 | 151 | |
5f7136db | 152 | nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); |
130879f1 CH |
153 | |
154 | ret = -ENOMEM; | |
155 | bio = bio_kmalloc(gfp_mask, nr_pages); | |
156 | if (!bio) | |
157 | goto out_bmd; | |
7589ad67 | 158 | bio->bi_opf |= req_op(rq); |
130879f1 CH |
159 | |
160 | if (map_data) { | |
161 | nr_pages = 1 << map_data->page_order; | |
162 | i = map_data->offset / PAGE_SIZE; | |
163 | } | |
164 | while (len) { | |
165 | unsigned int bytes = PAGE_SIZE; | |
166 | ||
167 | bytes -= offset; | |
168 | ||
169 | if (bytes > len) | |
170 | bytes = len; | |
171 | ||
172 | if (map_data) { | |
173 | if (i == map_data->nr_entries * nr_pages) { | |
174 | ret = -ENOMEM; | |
7589ad67 | 175 | goto cleanup; |
130879f1 CH |
176 | } |
177 | ||
178 | page = map_data->pages[i / nr_pages]; | |
179 | page += (i % nr_pages); | |
180 | ||
181 | i++; | |
182 | } else { | |
ce288e05 | 183 | page = alloc_page(GFP_NOIO | gfp_mask); |
130879f1 CH |
184 | if (!page) { |
185 | ret = -ENOMEM; | |
7589ad67 | 186 | goto cleanup; |
130879f1 CH |
187 | } |
188 | } | |
189 | ||
7589ad67 | 190 | if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { |
130879f1 CH |
191 | if (!map_data) |
192 | __free_page(page); | |
193 | break; | |
194 | } | |
195 | ||
196 | len -= bytes; | |
197 | offset = 0; | |
198 | } | |
199 | ||
130879f1 CH |
200 | if (map_data) |
201 | map_data->offset += bio->bi_iter.bi_size; | |
202 | ||
203 | /* | |
204 | * success | |
205 | */ | |
206 | if ((iov_iter_rw(iter) == WRITE && | |
207 | (!map_data || !map_data->null_mapped)) || | |
208 | (map_data && map_data->from_user)) { | |
209 | ret = bio_copy_from_iter(bio, iter); | |
210 | if (ret) | |
211 | goto cleanup; | |
212 | } else { | |
213 | if (bmd->is_our_pages) | |
214 | zero_fill_bio(bio); | |
215 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
216 | } | |
217 | ||
218 | bio->bi_private = bmd; | |
7589ad67 | 219 | |
393bb12e | 220 | ret = blk_rq_append_bio(rq, bio); |
7589ad67 CH |
221 | if (ret) |
222 | goto cleanup; | |
7589ad67 | 223 | return 0; |
130879f1 CH |
224 | cleanup: |
225 | if (!map_data) | |
226 | bio_free_pages(bio); | |
227 | bio_put(bio); | |
228 | out_bmd: | |
229 | kfree(bmd); | |
7589ad67 | 230 | return ret; |
130879f1 CH |
231 | } |
232 | ||
7589ad67 CH |
233 | static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, |
234 | gfp_t gfp_mask) | |
130879f1 | 235 | { |
7589ad67 | 236 | unsigned int max_sectors = queue_max_hw_sectors(rq->q); |
393bb12e | 237 | struct bio *bio; |
130879f1 | 238 | int ret; |
7589ad67 | 239 | int j; |
130879f1 CH |
240 | |
241 | if (!iov_iter_count(iter)) | |
7589ad67 | 242 | return -EINVAL; |
130879f1 | 243 | |
a8affc03 | 244 | bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS)); |
130879f1 | 245 | if (!bio) |
7589ad67 CH |
246 | return -ENOMEM; |
247 | bio->bi_opf |= req_op(rq); | |
130879f1 CH |
248 | |
249 | while (iov_iter_count(iter)) { | |
250 | struct page **pages; | |
251 | ssize_t bytes; | |
252 | size_t offs, added = 0; | |
253 | int npages; | |
254 | ||
255 | bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); | |
256 | if (unlikely(bytes <= 0)) { | |
257 | ret = bytes ? bytes : -EFAULT; | |
258 | goto out_unmap; | |
259 | } | |
260 | ||
261 | npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); | |
262 | ||
7589ad67 | 263 | if (unlikely(offs & queue_dma_alignment(rq->q))) { |
130879f1 CH |
264 | ret = -EINVAL; |
265 | j = 0; | |
266 | } else { | |
267 | for (j = 0; j < npages; j++) { | |
268 | struct page *page = pages[j]; | |
269 | unsigned int n = PAGE_SIZE - offs; | |
270 | bool same_page = false; | |
271 | ||
272 | if (n > bytes) | |
273 | n = bytes; | |
274 | ||
7589ad67 | 275 | if (!bio_add_hw_page(rq->q, bio, page, n, offs, |
e4581105 | 276 | max_sectors, &same_page)) { |
130879f1 CH |
277 | if (same_page) |
278 | put_page(page); | |
279 | break; | |
280 | } | |
281 | ||
282 | added += n; | |
283 | bytes -= n; | |
284 | offs = 0; | |
285 | } | |
286 | iov_iter_advance(iter, added); | |
287 | } | |
288 | /* | |
289 | * release the pages we didn't map into the bio, if any | |
290 | */ | |
291 | while (j < npages) | |
292 | put_page(pages[j++]); | |
293 | kvfree(pages); | |
294 | /* couldn't stuff something into bio? */ | |
295 | if (bytes) | |
296 | break; | |
297 | } | |
298 | ||
393bb12e | 299 | ret = blk_rq_append_bio(rq, bio); |
7589ad67 | 300 | if (ret) |
393bb12e | 301 | goto out_unmap; |
7589ad67 CH |
302 | return 0; |
303 | ||
130879f1 CH |
304 | out_unmap: |
305 | bio_release_pages(bio, false); | |
306 | bio_put(bio); | |
7589ad67 | 307 | return ret; |
130879f1 CH |
308 | } |
309 | ||
130879f1 CH |
310 | static void bio_invalidate_vmalloc_pages(struct bio *bio) |
311 | { | |
f358afc5 | 312 | #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
130879f1 CH |
313 | if (bio->bi_private && !op_is_write(bio_op(bio))) { |
314 | unsigned long i, len = 0; | |
315 | ||
316 | for (i = 0; i < bio->bi_vcnt; i++) | |
317 | len += bio->bi_io_vec[i].bv_len; | |
318 | invalidate_kernel_vmap_range(bio->bi_private, len); | |
319 | } | |
320 | #endif | |
321 | } | |
322 | ||
323 | static void bio_map_kern_endio(struct bio *bio) | |
324 | { | |
325 | bio_invalidate_vmalloc_pages(bio); | |
326 | bio_put(bio); | |
327 | } | |
328 | ||
329 | /** | |
330 | * bio_map_kern - map kernel address into bio | |
331 | * @q: the struct request_queue for the bio | |
332 | * @data: pointer to buffer to map | |
333 | * @len: length in bytes | |
334 | * @gfp_mask: allocation flags for bio allocation | |
335 | * | |
336 | * Map the kernel address into a bio suitable for io to a block | |
337 | * device. Returns an error pointer in case of error. | |
338 | */ | |
339 | static struct bio *bio_map_kern(struct request_queue *q, void *data, | |
340 | unsigned int len, gfp_t gfp_mask) | |
341 | { | |
342 | unsigned long kaddr = (unsigned long)data; | |
343 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
344 | unsigned long start = kaddr >> PAGE_SHIFT; | |
345 | const int nr_pages = end - start; | |
346 | bool is_vmalloc = is_vmalloc_addr(data); | |
347 | struct page *page; | |
348 | int offset, i; | |
349 | struct bio *bio; | |
350 | ||
351 | bio = bio_kmalloc(gfp_mask, nr_pages); | |
352 | if (!bio) | |
353 | return ERR_PTR(-ENOMEM); | |
354 | ||
355 | if (is_vmalloc) { | |
356 | flush_kernel_vmap_range(data, len); | |
357 | bio->bi_private = data; | |
358 | } | |
359 | ||
360 | offset = offset_in_page(kaddr); | |
361 | for (i = 0; i < nr_pages; i++) { | |
362 | unsigned int bytes = PAGE_SIZE - offset; | |
363 | ||
364 | if (len <= 0) | |
365 | break; | |
366 | ||
367 | if (bytes > len) | |
368 | bytes = len; | |
369 | ||
370 | if (!is_vmalloc) | |
371 | page = virt_to_page(data); | |
372 | else | |
373 | page = vmalloc_to_page(data); | |
374 | if (bio_add_pc_page(q, bio, page, bytes, | |
375 | offset) < bytes) { | |
376 | /* we don't support partial mappings */ | |
377 | bio_put(bio); | |
378 | return ERR_PTR(-EINVAL); | |
379 | } | |
380 | ||
381 | data += bytes; | |
382 | len -= bytes; | |
383 | offset = 0; | |
384 | } | |
385 | ||
386 | bio->bi_end_io = bio_map_kern_endio; | |
387 | return bio; | |
388 | } | |
389 | ||
390 | static void bio_copy_kern_endio(struct bio *bio) | |
391 | { | |
392 | bio_free_pages(bio); | |
393 | bio_put(bio); | |
394 | } | |
395 | ||
396 | static void bio_copy_kern_endio_read(struct bio *bio) | |
397 | { | |
398 | char *p = bio->bi_private; | |
399 | struct bio_vec *bvec; | |
400 | struct bvec_iter_all iter_all; | |
401 | ||
402 | bio_for_each_segment_all(bvec, bio, iter_all) { | |
d24920e2 | 403 | memcpy_from_bvec(p, bvec); |
130879f1 CH |
404 | p += bvec->bv_len; |
405 | } | |
406 | ||
407 | bio_copy_kern_endio(bio); | |
408 | } | |
409 | ||
410 | /** | |
411 | * bio_copy_kern - copy kernel address into bio | |
412 | * @q: the struct request_queue for the bio | |
413 | * @data: pointer to buffer to copy | |
414 | * @len: length in bytes | |
415 | * @gfp_mask: allocation flags for bio and page allocation | |
416 | * @reading: data direction is READ | |
417 | * | |
418 | * copy the kernel address into a bio suitable for io to a block | |
419 | * device. Returns an error pointer in case of error. | |
420 | */ | |
421 | static struct bio *bio_copy_kern(struct request_queue *q, void *data, | |
422 | unsigned int len, gfp_t gfp_mask, int reading) | |
423 | { | |
424 | unsigned long kaddr = (unsigned long)data; | |
425 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
426 | unsigned long start = kaddr >> PAGE_SHIFT; | |
427 | struct bio *bio; | |
428 | void *p = data; | |
429 | int nr_pages = 0; | |
430 | ||
431 | /* | |
432 | * Overflow, abort | |
433 | */ | |
434 | if (end < start) | |
435 | return ERR_PTR(-EINVAL); | |
436 | ||
437 | nr_pages = end - start; | |
438 | bio = bio_kmalloc(gfp_mask, nr_pages); | |
439 | if (!bio) | |
440 | return ERR_PTR(-ENOMEM); | |
441 | ||
442 | while (len) { | |
443 | struct page *page; | |
444 | unsigned int bytes = PAGE_SIZE; | |
445 | ||
446 | if (bytes > len) | |
447 | bytes = len; | |
448 | ||
cc8f7fe1 | 449 | page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); |
130879f1 CH |
450 | if (!page) |
451 | goto cleanup; | |
452 | ||
453 | if (!reading) | |
454 | memcpy(page_address(page), p, bytes); | |
455 | ||
456 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) | |
457 | break; | |
458 | ||
459 | len -= bytes; | |
460 | p += bytes; | |
461 | } | |
462 | ||
463 | if (reading) { | |
464 | bio->bi_end_io = bio_copy_kern_endio_read; | |
465 | bio->bi_private = data; | |
466 | } else { | |
467 | bio->bi_end_io = bio_copy_kern_endio; | |
468 | } | |
469 | ||
470 | return bio; | |
471 | ||
472 | cleanup: | |
473 | bio_free_pages(bio); | |
474 | bio_put(bio); | |
475 | return ERR_PTR(-ENOMEM); | |
476 | } | |
477 | ||
98d61d5b | 478 | /* |
0abc2a10 JA |
479 | * Append a bio to a passthrough request. Only works if the bio can be merged |
480 | * into the request based on the driver constraints. | |
98d61d5b | 481 | */ |
393bb12e | 482 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
86db1e29 | 483 | { |
14ccb66b CH |
484 | struct bvec_iter iter; |
485 | struct bio_vec bv; | |
486 | unsigned int nr_segs = 0; | |
0abc2a10 | 487 | |
393bb12e | 488 | bio_for_each_bvec(bv, bio, iter) |
14ccb66b CH |
489 | nr_segs++; |
490 | ||
98d61d5b | 491 | if (!rq->bio) { |
393bb12e | 492 | blk_rq_bio_prep(rq, bio, nr_segs); |
98d61d5b | 493 | } else { |
393bb12e | 494 | if (!ll_back_merge_fn(rq, bio, nr_segs)) |
98d61d5b | 495 | return -EINVAL; |
393bb12e CH |
496 | rq->biotail->bi_next = bio; |
497 | rq->biotail = bio; | |
498 | rq->__data_len += (bio)->bi_iter.bi_size; | |
499 | bio_crypt_free_ctx(bio); | |
86db1e29 | 500 | } |
98d61d5b | 501 | |
86db1e29 JA |
502 | return 0; |
503 | } | |
98d61d5b | 504 | EXPORT_SYMBOL(blk_rq_append_bio); |
86db1e29 | 505 | |
86db1e29 | 506 | /** |
aebf526b | 507 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
86db1e29 JA |
508 | * @q: request queue where request should be inserted |
509 | * @rq: request to map data to | |
152e283f | 510 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc | 511 | * @iter: iovec iterator |
a3bce90e | 512 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
513 | * |
514 | * Description: | |
710027a4 | 515 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
516 | * a kernel bounce buffer is used. |
517 | * | |
710027a4 | 518 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 | 519 | * still in process context. |
86db1e29 JA |
520 | */ |
521 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
26e49cfc KO |
522 | struct rq_map_data *map_data, |
523 | const struct iov_iter *iter, gfp_t gfp_mask) | |
86db1e29 | 524 | { |
357f435d AV |
525 | bool copy = false; |
526 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); | |
4d6af73d CH |
527 | struct bio *bio = NULL; |
528 | struct iov_iter i; | |
69e0927b | 529 | int ret = -EINVAL; |
86db1e29 | 530 | |
a0ac402c LT |
531 | if (!iter_is_iovec(iter)) |
532 | goto fail; | |
533 | ||
357f435d AV |
534 | if (map_data) |
535 | copy = true; | |
393bb12e CH |
536 | else if (blk_queue_may_bounce(q)) |
537 | copy = true; | |
357f435d AV |
538 | else if (iov_iter_alignment(iter) & align) |
539 | copy = true; | |
540 | else if (queue_virt_boundary(q)) | |
541 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); | |
afdc1a78 | 542 | |
4d6af73d CH |
543 | i = *iter; |
544 | do { | |
7589ad67 CH |
545 | if (copy) |
546 | ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); | |
547 | else | |
548 | ret = bio_map_user_iov(rq, &i, gfp_mask); | |
4d6af73d CH |
549 | if (ret) |
550 | goto unmap_rq; | |
551 | if (!bio) | |
552 | bio = rq->bio; | |
553 | } while (iov_iter_count(&i)); | |
86db1e29 | 554 | |
86db1e29 | 555 | return 0; |
4d6af73d CH |
556 | |
557 | unmap_rq: | |
3b7995a9 | 558 | blk_rq_unmap_user(bio); |
a0ac402c | 559 | fail: |
4d6af73d | 560 | rq->bio = NULL; |
69e0927b | 561 | return ret; |
86db1e29 | 562 | } |
152e283f | 563 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e29 | 564 | |
ddad8dd0 CH |
565 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
566 | struct rq_map_data *map_data, void __user *ubuf, | |
567 | unsigned long len, gfp_t gfp_mask) | |
568 | { | |
26e49cfc KO |
569 | struct iovec iov; |
570 | struct iov_iter i; | |
8f7e885a | 571 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0 | 572 | |
8f7e885a AV |
573 | if (unlikely(ret < 0)) |
574 | return ret; | |
ddad8dd0 | 575 | |
26e49cfc | 576 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0 CH |
577 | } |
578 | EXPORT_SYMBOL(blk_rq_map_user); | |
579 | ||
86db1e29 JA |
580 | /** |
581 | * blk_rq_unmap_user - unmap a request with user data | |
582 | * @bio: start of bio list | |
583 | * | |
584 | * Description: | |
585 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
586 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
710027a4 | 587 | * the I/O completion may have changed rq->bio. |
86db1e29 JA |
588 | */ |
589 | int blk_rq_unmap_user(struct bio *bio) | |
590 | { | |
393bb12e | 591 | struct bio *next_bio; |
86db1e29 JA |
592 | int ret = 0, ret2; |
593 | ||
594 | while (bio) { | |
3310eeba | 595 | if (bio->bi_private) { |
393bb12e | 596 | ret2 = bio_uncopy_user(bio); |
7b63c052 CH |
597 | if (ret2 && !ret) |
598 | ret = ret2; | |
3310eeba | 599 | } else { |
393bb12e | 600 | bio_release_pages(bio, bio_data_dir(bio) == READ); |
7b63c052 | 601 | } |
86db1e29 | 602 | |
393bb12e | 603 | next_bio = bio; |
86db1e29 | 604 | bio = bio->bi_next; |
393bb12e | 605 | bio_put(next_bio); |
86db1e29 JA |
606 | } |
607 | ||
608 | return ret; | |
609 | } | |
86db1e29 JA |
610 | EXPORT_SYMBOL(blk_rq_unmap_user); |
611 | ||
612 | /** | |
aebf526b | 613 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
86db1e29 JA |
614 | * @q: request queue where request should be inserted |
615 | * @rq: request to fill | |
616 | * @kbuf: the kernel buffer | |
617 | * @len: length of user data | |
618 | * @gfp_mask: memory allocation flags | |
68154e90 FT |
619 | * |
620 | * Description: | |
621 | * Data will be mapped directly if possible. Otherwise a bounce | |
e227867f | 622 | * buffer is used. Can be called multiple times to append multiple |
3a5a3927 | 623 | * buffers. |
86db1e29 JA |
624 | */ |
625 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
626 | unsigned int len, gfp_t gfp_mask) | |
627 | { | |
68154e90 | 628 | int reading = rq_data_dir(rq) == READ; |
14417799 | 629 | unsigned long addr = (unsigned long) kbuf; |
393bb12e | 630 | struct bio *bio; |
3a5a3927 | 631 | int ret; |
86db1e29 | 632 | |
ae03bf63 | 633 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 JA |
634 | return -EINVAL; |
635 | if (!len || !kbuf) | |
636 | return -EINVAL; | |
637 | ||
393bb12e CH |
638 | if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || |
639 | blk_queue_may_bounce(q)) | |
68154e90 FT |
640 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
641 | else | |
642 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
643 | ||
86db1e29 JA |
644 | if (IS_ERR(bio)) |
645 | return PTR_ERR(bio); | |
646 | ||
aebf526b CH |
647 | bio->bi_opf &= ~REQ_OP_MASK; |
648 | bio->bi_opf |= req_op(rq); | |
86db1e29 | 649 | |
393bb12e CH |
650 | ret = blk_rq_append_bio(rq, bio); |
651 | if (unlikely(ret)) | |
652 | bio_put(bio); | |
653 | return ret; | |
86db1e29 | 654 | } |
86db1e29 | 655 | EXPORT_SYMBOL(blk_rq_map_kern); |