1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2003 Sistina Software
4 * Copyright (C) 2006 Red Hat GmbH
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
13 #include <linux/bio.h>
14 #include <linux/completion.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dm-io.h>
21 #define DM_MSG_PREFIX "io"
23 #define DM_IO_MAX_REGIONS BITS_PER_LONG
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
35 unsigned long error_bits;
37 struct dm_io_client *client;
38 io_notify_fn callback;
40 void *vma_invalidate_address;
41 unsigned long vma_invalidate_size;
42 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
44 static struct kmem_cache *_dm_io_cache;
47 * Create a client with mempool and bioset.
49 struct dm_io_client *dm_io_client_create(void)
51 struct dm_io_client *client;
52 unsigned int min_ios = dm_get_reserved_bio_based_ios();
55 client = kzalloc(sizeof(*client), GFP_KERNEL);
57 return ERR_PTR(-ENOMEM);
59 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
63 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
70 mempool_exit(&client->pool);
74 EXPORT_SYMBOL(dm_io_client_create);
76 void dm_io_client_destroy(struct dm_io_client *client)
78 mempool_exit(&client->pool);
79 bioset_exit(&client->bios);
82 EXPORT_SYMBOL(dm_io_client_destroy);
85 *-------------------------------------------------------------------
86 * We need to keep track of which region a bio is doing io for.
87 * To avoid a memory allocation to store just 5 or 6 bits, we
88 * ensure the 'struct io' pointer is aligned so enough low bits are
89 * always zero and then combine it with the region number directly in
91 *-------------------------------------------------------------------
93 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
96 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
97 DMCRIT("Unaligned struct io pointer %p", io);
101 bio->bi_private = (void *)((unsigned long)io | region);
104 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
105 unsigned int *region)
107 unsigned long val = (unsigned long)bio->bi_private;
109 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
110 *region = val & (DM_IO_MAX_REGIONS - 1);
114 *--------------------------------------------------------------
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
117 *--------------------------------------------------------------
119 static void complete_io(struct io *io)
121 unsigned long error_bits = io->error_bits;
122 io_notify_fn fn = io->callback;
123 void *context = io->context;
125 if (io->vma_invalidate_size)
126 invalidate_kernel_vmap_range(io->vma_invalidate_address,
127 io->vma_invalidate_size);
129 mempool_free(io, &io->client->pool);
130 fn(error_bits, context);
133 static void dec_count(struct io *io, unsigned int region, blk_status_t error)
136 set_bit(region, &io->error_bits);
138 if (atomic_dec_and_test(&io->count))
142 static void endio(struct bio *bio)
148 if (bio->bi_status && bio_data_dir(bio) == READ)
152 * The bio destructor in bio_put() may use the io object.
154 retrieve_io_and_region_from_bio(bio, &io, ®ion);
156 error = bio->bi_status;
159 dec_count(io, region, error);
163 *--------------------------------------------------------------
164 * These little objects provide an abstraction for getting a new
165 * destination page for io.
166 *--------------------------------------------------------------
169 void (*get_page)(struct dpages *dp,
170 struct page **p, unsigned long *len, unsigned int *offset);
171 void (*next_page)(struct dpages *dp);
174 unsigned int context_u;
175 struct bvec_iter context_bi;
179 void *vma_invalidate_address;
180 unsigned long vma_invalidate_size;
184 * Functions for getting the pages from a list.
186 static void list_get_page(struct dpages *dp,
187 struct page **p, unsigned long *len, unsigned int *offset)
189 unsigned int o = dp->context_u;
190 struct page_list *pl = (struct page_list *) dp->context_ptr;
193 *len = PAGE_SIZE - o;
197 static void list_next_page(struct dpages *dp)
199 struct page_list *pl = (struct page_list *) dp->context_ptr;
201 dp->context_ptr = pl->next;
205 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
207 dp->get_page = list_get_page;
208 dp->next_page = list_next_page;
209 dp->context_u = offset;
210 dp->context_ptr = pl;
214 * Functions for getting the pages from a bvec.
216 static void bio_get_page(struct dpages *dp, struct page **p,
217 unsigned long *len, unsigned int *offset)
219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
224 *offset = bvec.bv_offset;
226 /* avoid figuring it out again in bio_next_page() */
227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
230 static void bio_next_page(struct dpages *dp)
232 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
234 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
235 &dp->context_bi, len);
238 static void bio_dp_init(struct dpages *dp, struct bio *bio)
240 dp->get_page = bio_get_page;
241 dp->next_page = bio_next_page;
244 * We just use bvec iterator to retrieve pages, so it is ok to
245 * access the bvec table directly here
247 dp->context_ptr = bio->bi_io_vec;
248 dp->context_bi = bio->bi_iter;
252 * Functions for getting the pages from a VMA.
254 static void vm_get_page(struct dpages *dp,
255 struct page **p, unsigned long *len, unsigned int *offset)
257 *p = vmalloc_to_page(dp->context_ptr);
258 *offset = dp->context_u;
259 *len = PAGE_SIZE - dp->context_u;
262 static void vm_next_page(struct dpages *dp)
264 dp->context_ptr += PAGE_SIZE - dp->context_u;
268 static void vm_dp_init(struct dpages *dp, void *data)
270 dp->get_page = vm_get_page;
271 dp->next_page = vm_next_page;
272 dp->context_u = offset_in_page(data);
273 dp->context_ptr = data;
277 * Functions for getting the pages from kernel memory.
279 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
280 unsigned int *offset)
282 *p = virt_to_page(dp->context_ptr);
283 *offset = dp->context_u;
284 *len = PAGE_SIZE - dp->context_u;
287 static void km_next_page(struct dpages *dp)
289 dp->context_ptr += PAGE_SIZE - dp->context_u;
293 static void km_dp_init(struct dpages *dp, void *data)
295 dp->get_page = km_get_page;
296 dp->next_page = km_next_page;
297 dp->context_u = offset_in_page(data);
298 dp->context_ptr = data;
302 *---------------------------------------------------------------
303 * IO routines that accept a list of pages.
304 *---------------------------------------------------------------
306 static void do_region(const blk_opf_t opf, unsigned int region,
307 struct dm_io_region *where, struct dpages *dp,
314 unsigned int num_bvecs;
315 sector_t remaining = where->count;
316 struct request_queue *q = bdev_get_queue(where->bdev);
317 sector_t num_sectors;
318 unsigned int special_cmd_max_sectors;
319 const enum req_op op = opf & REQ_OP_MASK;
322 * Reject unsupported discard and write same requests.
324 if (op == REQ_OP_DISCARD)
325 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
326 else if (op == REQ_OP_WRITE_ZEROES)
327 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
328 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
329 special_cmd_max_sectors == 0) {
330 atomic_inc(&io->count);
331 dec_count(io, region, BLK_STS_NOTSUPP);
336 * where->count may be zero if op holds a flush and we need to
337 * send a zero-sized flush.
341 * Allocate a suitably sized-bio.
345 case REQ_OP_WRITE_ZEROES:
349 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
350 (PAGE_SIZE >> SECTOR_SHIFT)));
353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
355 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
356 bio->bi_end_io = endio;
357 store_io_and_region_in_bio(bio, io, region);
359 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
360 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
361 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
362 remaining -= num_sectors;
366 * Try and add as many pages as possible.
368 dp->get_page(dp, &page, &len, &offset);
369 len = min(len, to_bytes(remaining));
370 if (!bio_add_page(bio, page, len, offset))
374 remaining -= to_sector(len);
379 atomic_inc(&io->count);
384 static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
385 struct dm_io_region *where, struct dpages *dp,
386 struct io *io, int sync)
389 struct dpages old_pages = *dp;
391 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
397 * For multiple regions we need to be careful to rewind
398 * the dp object for each call to do_region.
400 for (i = 0; i < num_regions; i++) {
402 if (where[i].count || (opf & REQ_PREFLUSH))
403 do_region(opf, i, where + i, dp, io);
407 * Drop the extra reference that we were holding to avoid
408 * the io being completed too early.
414 unsigned long error_bits;
415 struct completion wait;
418 static void sync_io_complete(unsigned long error, void *context)
420 struct sync_io *sio = context;
422 sio->error_bits = error;
423 complete(&sio->wait);
426 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
427 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
428 unsigned long *error_bits)
433 if (num_regions > 1 && !op_is_write(opf)) {
438 init_completion(&sio.wait);
440 io = mempool_alloc(&client->pool, GFP_NOIO);
442 atomic_set(&io->count, 1); /* see dispatch_io() */
444 io->callback = sync_io_complete;
447 io->vma_invalidate_address = dp->vma_invalidate_address;
448 io->vma_invalidate_size = dp->vma_invalidate_size;
450 dispatch_io(opf, num_regions, where, dp, io, 1);
452 wait_for_completion_io(&sio.wait);
455 *error_bits = sio.error_bits;
457 return sio.error_bits ? -EIO : 0;
460 static int async_io(struct dm_io_client *client, unsigned int num_regions,
461 struct dm_io_region *where, blk_opf_t opf,
462 struct dpages *dp, io_notify_fn fn, void *context)
466 if (num_regions > 1 && !op_is_write(opf)) {
472 io = mempool_alloc(&client->pool, GFP_NOIO);
474 atomic_set(&io->count, 1); /* see dispatch_io() */
477 io->context = context;
479 io->vma_invalidate_address = dp->vma_invalidate_address;
480 io->vma_invalidate_size = dp->vma_invalidate_size;
482 dispatch_io(opf, num_regions, where, dp, io, 0);
486 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
489 /* Set up dpages based on memory type */
491 dp->vma_invalidate_address = NULL;
492 dp->vma_invalidate_size = 0;
494 switch (io_req->mem.type) {
495 case DM_IO_PAGE_LIST:
496 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
500 bio_dp_init(dp, io_req->mem.ptr.bio);
504 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
505 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
506 dp->vma_invalidate_address = io_req->mem.ptr.vma;
507 dp->vma_invalidate_size = size;
509 vm_dp_init(dp, io_req->mem.ptr.vma);
513 km_dp_init(dp, io_req->mem.ptr.addr);
523 int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
524 struct dm_io_region *where, unsigned long *sync_error_bits)
529 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
533 if (!io_req->notify.fn)
534 return sync_io(io_req->client, num_regions, where,
535 io_req->bi_opf, &dp, sync_error_bits);
537 return async_io(io_req->client, num_regions, where,
538 io_req->bi_opf, &dp, io_req->notify.fn,
539 io_req->notify.context);
541 EXPORT_SYMBOL(dm_io);
543 int __init dm_io_init(void)
545 _dm_io_cache = KMEM_CACHE(io, 0);
552 void dm_io_exit(void)
554 kmem_cache_destroy(_dm_io_cache);