1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 Red Hat. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-prison-v2.h"
10 #include "dm-bio-record.h"
11 #include "dm-cache-metadata.h"
12 #include "dm-io-tracker.h"
14 #include <linux/dm-io.h>
15 #include <linux/dm-kcopyd.h>
16 #include <linux/jiffies.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/rwsem.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
24 #define DM_MSG_PREFIX "cache"
26 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
27 "A percentage of time allocated for copying to and/or from cache");
29 /*----------------------------------------------------------------*/
34 * oblock: index of an origin block
35 * cblock: index of a cache block
36 * promotion: movement of a block from origin to cache
37 * demotion: movement of a block from cache to origin
38 * migration: movement of a block between the origin and cache device,
42 /*----------------------------------------------------------------*/
45 * Represents a chunk of future work. 'input' allows continuations to pass
46 * values between themselves, typically error values.
49 struct work_struct ws;
53 static inline void init_continuation(struct continuation *k,
54 void (*fn)(struct work_struct *))
56 INIT_WORK(&k->ws, fn);
60 static inline void queue_continuation(struct workqueue_struct *wq,
61 struct continuation *k)
63 queue_work(wq, &k->ws);
66 /*----------------------------------------------------------------*/
69 * The batcher collects together pieces of work that need a particular
70 * operation to occur before they can proceed (typically a commit).
74 * The operation that everyone is waiting for.
76 blk_status_t (*commit_op)(void *context);
80 * This is how bios should be issued once the commit op is complete
81 * (accounted_request).
83 void (*issue_op)(struct bio *bio, void *context);
87 * Queued work gets put on here after commit.
89 struct workqueue_struct *wq;
92 struct list_head work_items;
94 struct work_struct commit_work;
96 bool commit_scheduled;
99 static void __commit(struct work_struct *_ws)
101 struct batcher *b = container_of(_ws, struct batcher, commit_work);
103 struct list_head work_items;
104 struct work_struct *ws, *tmp;
105 struct continuation *k;
107 struct bio_list bios;
109 INIT_LIST_HEAD(&work_items);
110 bio_list_init(&bios);
113 * We have to grab these before the commit_op to avoid a race
116 spin_lock_irq(&b->lock);
117 list_splice_init(&b->work_items, &work_items);
118 bio_list_merge(&bios, &b->bios);
119 bio_list_init(&b->bios);
120 b->commit_scheduled = false;
121 spin_unlock_irq(&b->lock);
123 r = b->commit_op(b->commit_context);
125 list_for_each_entry_safe(ws, tmp, &work_items, entry) {
126 k = container_of(ws, struct continuation, ws);
128 INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
129 queue_work(b->wq, ws);
132 while ((bio = bio_list_pop(&bios))) {
137 b->issue_op(bio, b->issue_context);
141 static void batcher_init(struct batcher *b,
142 blk_status_t (*commit_op)(void *),
143 void *commit_context,
144 void (*issue_op)(struct bio *bio, void *),
146 struct workqueue_struct *wq)
148 b->commit_op = commit_op;
149 b->commit_context = commit_context;
150 b->issue_op = issue_op;
151 b->issue_context = issue_context;
154 spin_lock_init(&b->lock);
155 INIT_LIST_HEAD(&b->work_items);
156 bio_list_init(&b->bios);
157 INIT_WORK(&b->commit_work, __commit);
158 b->commit_scheduled = false;
161 static void async_commit(struct batcher *b)
163 queue_work(b->wq, &b->commit_work);
166 static void continue_after_commit(struct batcher *b, struct continuation *k)
168 bool commit_scheduled;
170 spin_lock_irq(&b->lock);
171 commit_scheduled = b->commit_scheduled;
172 list_add_tail(&k->ws.entry, &b->work_items);
173 spin_unlock_irq(&b->lock);
175 if (commit_scheduled)
180 * Bios are errored if commit failed.
182 static void issue_after_commit(struct batcher *b, struct bio *bio)
184 bool commit_scheduled;
186 spin_lock_irq(&b->lock);
187 commit_scheduled = b->commit_scheduled;
188 bio_list_add(&b->bios, bio);
189 spin_unlock_irq(&b->lock);
191 if (commit_scheduled)
196 * Call this if some urgent work is waiting for the commit to complete.
198 static void schedule_commit(struct batcher *b)
202 spin_lock_irq(&b->lock);
203 immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
204 b->commit_scheduled = true;
205 spin_unlock_irq(&b->lock);
212 * There are a couple of places where we let a bio run, but want to do some
213 * work before calling its endio function. We do this by temporarily
214 * changing the endio fn.
216 struct dm_hook_info {
217 bio_end_io_t *bi_end_io;
220 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
221 bio_end_io_t *bi_end_io, void *bi_private)
223 h->bi_end_io = bio->bi_end_io;
225 bio->bi_end_io = bi_end_io;
226 bio->bi_private = bi_private;
229 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
231 bio->bi_end_io = h->bi_end_io;
234 /*----------------------------------------------------------------*/
236 #define MIGRATION_POOL_SIZE 128
237 #define COMMIT_PERIOD HZ
238 #define MIGRATION_COUNT_WINDOW 10
241 * The block size of the device holding cache data must be
242 * between 32KB and 1GB.
244 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
245 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
247 enum cache_metadata_mode {
248 CM_WRITE, /* metadata may be changed */
249 CM_READ_ONLY, /* metadata may not be changed */
255 * Data is written to cached blocks only. These blocks are marked
256 * dirty. If you lose the cache device you will lose data.
257 * Potential performance increase for both reads and writes.
262 * Data is written to both cache and origin. Blocks are never
263 * dirty. Potential performance benfit for reads only.
268 * A degraded mode useful for various cache coherency situations
269 * (eg, rolling back snapshots). Reads and writes always go to the
270 * origin. If a write goes to a cached oblock, then the cache
271 * block is invalidated.
276 struct cache_features {
277 enum cache_metadata_mode mode;
278 enum cache_io_mode io_mode;
279 unsigned int metadata_version;
280 bool discard_passdown:1;
291 atomic_t copies_avoided;
292 atomic_t cache_cell_clash;
293 atomic_t commit_count;
294 atomic_t discard_count;
298 struct dm_target *ti;
302 * Fields for converting from sectors to blocks.
304 int sectors_per_block_shift;
305 sector_t sectors_per_block;
307 struct dm_cache_metadata *cmd;
310 * Metadata is written to this device.
312 struct dm_dev *metadata_dev;
315 * The slower of the two data devices. Typically a spindle.
317 struct dm_dev *origin_dev;
320 * The faster of the two data devices. Typically an SSD.
322 struct dm_dev *cache_dev;
325 * Size of the origin device in _complete_ blocks and native sectors.
327 dm_oblock_t origin_blocks;
328 sector_t origin_sectors;
331 * Size of the cache device in blocks.
333 dm_cblock_t cache_size;
336 * Invalidation fields.
338 spinlock_t invalidation_lock;
339 struct list_head invalidation_requests;
341 sector_t migration_threshold;
342 wait_queue_head_t migration_wait;
343 atomic_t nr_allocated_migrations;
346 * The number of in flight migrations that are performing
347 * background io. eg, promotion, writeback.
349 atomic_t nr_io_migrations;
351 struct bio_list deferred_bios;
353 struct rw_semaphore quiesce_lock;
356 * origin_blocks entries, discarded if set.
358 dm_dblock_t discard_nr_blocks;
359 unsigned long *discard_bitset;
360 uint32_t discard_block_size; /* a power of 2 times sectors per block */
363 * Rather than reconstructing the table line for the status we just
364 * save it and regurgitate.
366 unsigned int nr_ctr_args;
367 const char **ctr_args;
369 struct dm_kcopyd_client *copier;
370 struct work_struct deferred_bio_worker;
371 struct work_struct migration_worker;
372 struct workqueue_struct *wq;
373 struct delayed_work waker;
374 struct dm_bio_prison_v2 *prison;
377 * cache_size entries, dirty if set
379 unsigned long *dirty_bitset;
382 unsigned int policy_nr_args;
383 struct dm_cache_policy *policy;
386 * Cache features such as write-through.
388 struct cache_features features;
390 struct cache_stats stats;
392 bool need_tick_bio:1;
395 bool commit_requested:1;
396 bool loaded_mappings:1;
397 bool loaded_discards:1;
399 struct rw_semaphore background_work_lock;
401 struct batcher committer;
402 struct work_struct commit_ws;
404 struct dm_io_tracker tracker;
406 mempool_t migration_pool;
411 struct per_bio_data {
413 unsigned int req_nr:2;
414 struct dm_bio_prison_cell_v2 *cell;
415 struct dm_hook_info hook_info;
419 struct dm_cache_migration {
420 struct continuation k;
423 struct policy_work *op;
424 struct bio *overwrite_bio;
425 struct dm_bio_prison_cell_v2 *cell;
427 dm_cblock_t invalidate_cblock;
428 dm_oblock_t invalidate_oblock;
431 /*----------------------------------------------------------------*/
433 static bool writethrough_mode(struct cache *cache)
435 return cache->features.io_mode == CM_IO_WRITETHROUGH;
438 static bool writeback_mode(struct cache *cache)
440 return cache->features.io_mode == CM_IO_WRITEBACK;
443 static inline bool passthrough_mode(struct cache *cache)
445 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
448 /*----------------------------------------------------------------*/
450 static void wake_deferred_bio_worker(struct cache *cache)
452 queue_work(cache->wq, &cache->deferred_bio_worker);
455 static void wake_migration_worker(struct cache *cache)
457 if (passthrough_mode(cache))
460 queue_work(cache->wq, &cache->migration_worker);
463 /*----------------------------------------------------------------*/
465 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
467 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
470 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
472 dm_bio_prison_free_cell_v2(cache->prison, cell);
475 static struct dm_cache_migration *alloc_migration(struct cache *cache)
477 struct dm_cache_migration *mg;
479 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
481 memset(mg, 0, sizeof(*mg));
484 atomic_inc(&cache->nr_allocated_migrations);
489 static void free_migration(struct dm_cache_migration *mg)
491 struct cache *cache = mg->cache;
493 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
494 wake_up(&cache->migration_wait);
496 mempool_free(mg, &cache->migration_pool);
499 /*----------------------------------------------------------------*/
501 static inline dm_oblock_t oblock_succ(dm_oblock_t b)
503 return to_oblock(from_oblock(b) + 1ull);
506 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
510 key->block_begin = from_oblock(begin);
511 key->block_end = from_oblock(end);
515 * We have two lock levels. Level 0, which is used to prevent WRITEs, and
516 * level 1 which prevents *both* READs and WRITEs.
518 #define WRITE_LOCK_LEVEL 0
519 #define READ_WRITE_LOCK_LEVEL 1
521 static unsigned int lock_level(struct bio *bio)
523 return bio_data_dir(bio) == WRITE ?
525 READ_WRITE_LOCK_LEVEL;
529 *--------------------------------------------------------------
531 *--------------------------------------------------------------
534 static struct per_bio_data *get_per_bio_data(struct bio *bio)
536 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
542 static struct per_bio_data *init_per_bio_data(struct bio *bio)
544 struct per_bio_data *pb = get_per_bio_data(bio);
547 pb->req_nr = dm_bio_get_target_bio_nr(bio);
554 /*----------------------------------------------------------------*/
556 static void defer_bio(struct cache *cache, struct bio *bio)
558 spin_lock_irq(&cache->lock);
559 bio_list_add(&cache->deferred_bios, bio);
560 spin_unlock_irq(&cache->lock);
562 wake_deferred_bio_worker(cache);
565 static void defer_bios(struct cache *cache, struct bio_list *bios)
567 spin_lock_irq(&cache->lock);
568 bio_list_merge(&cache->deferred_bios, bios);
570 spin_unlock_irq(&cache->lock);
572 wake_deferred_bio_worker(cache);
575 /*----------------------------------------------------------------*/
577 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
580 struct per_bio_data *pb;
581 struct dm_cell_key_v2 key;
582 dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
583 struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
585 cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
587 build_key(oblock, end, &key);
588 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
591 * Failed to get the lock.
593 free_prison_cell(cache, cell_prealloc);
597 if (cell != cell_prealloc)
598 free_prison_cell(cache, cell_prealloc);
600 pb = get_per_bio_data(bio);
606 /*----------------------------------------------------------------*/
608 static bool is_dirty(struct cache *cache, dm_cblock_t b)
610 return test_bit(from_cblock(b), cache->dirty_bitset);
613 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
615 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
616 atomic_inc(&cache->nr_dirty);
617 policy_set_dirty(cache->policy, cblock);
622 * These two are called when setting after migrations to force the policy
623 * and dirty bitset to be in sync.
625 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
627 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
628 atomic_inc(&cache->nr_dirty);
629 policy_set_dirty(cache->policy, cblock);
632 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
634 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
635 if (atomic_dec_return(&cache->nr_dirty) == 0)
636 dm_table_event(cache->ti->table);
639 policy_clear_dirty(cache->policy, cblock);
642 /*----------------------------------------------------------------*/
644 static bool block_size_is_power_of_two(struct cache *cache)
646 return cache->sectors_per_block_shift >= 0;
649 static dm_block_t block_div(dm_block_t b, uint32_t n)
656 static dm_block_t oblocks_per_dblock(struct cache *cache)
658 dm_block_t oblocks = cache->discard_block_size;
660 if (block_size_is_power_of_two(cache))
661 oblocks >>= cache->sectors_per_block_shift;
663 oblocks = block_div(oblocks, cache->sectors_per_block);
668 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
670 return to_dblock(block_div(from_oblock(oblock),
671 oblocks_per_dblock(cache)));
674 static void set_discard(struct cache *cache, dm_dblock_t b)
676 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
677 atomic_inc(&cache->stats.discard_count);
679 spin_lock_irq(&cache->lock);
680 set_bit(from_dblock(b), cache->discard_bitset);
681 spin_unlock_irq(&cache->lock);
684 static void clear_discard(struct cache *cache, dm_dblock_t b)
686 spin_lock_irq(&cache->lock);
687 clear_bit(from_dblock(b), cache->discard_bitset);
688 spin_unlock_irq(&cache->lock);
691 static bool is_discarded(struct cache *cache, dm_dblock_t b)
695 spin_lock_irq(&cache->lock);
696 r = test_bit(from_dblock(b), cache->discard_bitset);
697 spin_unlock_irq(&cache->lock);
702 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
706 spin_lock_irq(&cache->lock);
707 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
708 cache->discard_bitset);
709 spin_unlock_irq(&cache->lock);
715 * -------------------------------------------------------------
717 *--------------------------------------------------------------
719 static void remap_to_origin(struct cache *cache, struct bio *bio)
721 bio_set_dev(bio, cache->origin_dev->bdev);
724 static void remap_to_cache(struct cache *cache, struct bio *bio,
727 sector_t bi_sector = bio->bi_iter.bi_sector;
728 sector_t block = from_cblock(cblock);
730 bio_set_dev(bio, cache->cache_dev->bdev);
731 if (!block_size_is_power_of_two(cache))
732 bio->bi_iter.bi_sector =
733 (block * cache->sectors_per_block) +
734 sector_div(bi_sector, cache->sectors_per_block);
736 bio->bi_iter.bi_sector =
737 (block << cache->sectors_per_block_shift) |
738 (bi_sector & (cache->sectors_per_block - 1));
741 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
743 struct per_bio_data *pb;
745 spin_lock_irq(&cache->lock);
746 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
747 bio_op(bio) != REQ_OP_DISCARD) {
748 pb = get_per_bio_data(bio);
750 cache->need_tick_bio = false;
752 spin_unlock_irq(&cache->lock);
755 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
758 // FIXME: check_if_tick_bio_needed() is called way too much through this interface
759 check_if_tick_bio_needed(cache, bio);
760 remap_to_origin(cache, bio);
761 if (bio_data_dir(bio) == WRITE)
762 clear_discard(cache, oblock_to_dblock(cache, oblock));
765 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
766 dm_oblock_t oblock, dm_cblock_t cblock)
768 check_if_tick_bio_needed(cache, bio);
769 remap_to_cache(cache, bio, cblock);
770 if (bio_data_dir(bio) == WRITE) {
771 set_dirty(cache, cblock);
772 clear_discard(cache, oblock_to_dblock(cache, oblock));
776 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
778 sector_t block_nr = bio->bi_iter.bi_sector;
780 if (!block_size_is_power_of_two(cache))
781 (void) sector_div(block_nr, cache->sectors_per_block);
783 block_nr >>= cache->sectors_per_block_shift;
785 return to_oblock(block_nr);
788 static bool accountable_bio(struct cache *cache, struct bio *bio)
790 return bio_op(bio) != REQ_OP_DISCARD;
793 static void accounted_begin(struct cache *cache, struct bio *bio)
795 struct per_bio_data *pb;
797 if (accountable_bio(cache, bio)) {
798 pb = get_per_bio_data(bio);
799 pb->len = bio_sectors(bio);
800 dm_iot_io_begin(&cache->tracker, pb->len);
804 static void accounted_complete(struct cache *cache, struct bio *bio)
806 struct per_bio_data *pb = get_per_bio_data(bio);
808 dm_iot_io_end(&cache->tracker, pb->len);
811 static void accounted_request(struct cache *cache, struct bio *bio)
813 accounted_begin(cache, bio);
814 dm_submit_bio_remap(bio, NULL);
817 static void issue_op(struct bio *bio, void *context)
819 struct cache *cache = context;
821 accounted_request(cache, bio);
825 * When running in writethrough mode we need to send writes to clean blocks
826 * to both the cache and origin devices. Clone the bio and send them in parallel.
828 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
829 dm_oblock_t oblock, dm_cblock_t cblock)
831 struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
832 GFP_NOIO, &cache->bs);
836 bio_chain(origin_bio, bio);
838 if (bio_data_dir(origin_bio) == WRITE)
839 clear_discard(cache, oblock_to_dblock(cache, oblock));
840 submit_bio(origin_bio);
842 remap_to_cache(cache, bio, cblock);
846 *--------------------------------------------------------------
848 *--------------------------------------------------------------
850 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
852 return cache->features.mode;
855 static const char *cache_device_name(struct cache *cache)
857 return dm_table_device_name(cache->ti->table);
860 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
862 static const char *descs[] = {
868 dm_table_event(cache->ti->table);
869 DMINFO("%s: switching cache to %s mode",
870 cache_device_name(cache), descs[(int)mode]);
873 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
876 enum cache_metadata_mode old_mode = get_cache_mode(cache);
878 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
879 DMERR("%s: unable to read needs_check flag, setting failure mode.",
880 cache_device_name(cache));
884 if (new_mode == CM_WRITE && needs_check) {
885 DMERR("%s: unable to switch cache to write mode until repaired.",
886 cache_device_name(cache));
887 if (old_mode != new_mode)
890 new_mode = CM_READ_ONLY;
893 /* Never move out of fail mode */
894 if (old_mode == CM_FAIL)
900 dm_cache_metadata_set_read_only(cache->cmd);
904 dm_cache_metadata_set_read_write(cache->cmd);
908 cache->features.mode = new_mode;
910 if (new_mode != old_mode)
911 notify_mode_switch(cache, new_mode);
914 static void abort_transaction(struct cache *cache)
916 const char *dev_name = cache_device_name(cache);
918 if (get_cache_mode(cache) >= CM_READ_ONLY)
921 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
922 if (dm_cache_metadata_abort(cache->cmd)) {
923 DMERR("%s: failed to abort metadata transaction", dev_name);
924 set_cache_mode(cache, CM_FAIL);
927 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
928 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
929 set_cache_mode(cache, CM_FAIL);
933 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
935 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
936 cache_device_name(cache), op, r);
937 abort_transaction(cache);
938 set_cache_mode(cache, CM_READ_ONLY);
941 /*----------------------------------------------------------------*/
943 static void load_stats(struct cache *cache)
945 struct dm_cache_statistics stats;
947 dm_cache_metadata_get_stats(cache->cmd, &stats);
948 atomic_set(&cache->stats.read_hit, stats.read_hits);
949 atomic_set(&cache->stats.read_miss, stats.read_misses);
950 atomic_set(&cache->stats.write_hit, stats.write_hits);
951 atomic_set(&cache->stats.write_miss, stats.write_misses);
954 static void save_stats(struct cache *cache)
956 struct dm_cache_statistics stats;
958 if (get_cache_mode(cache) >= CM_READ_ONLY)
961 stats.read_hits = atomic_read(&cache->stats.read_hit);
962 stats.read_misses = atomic_read(&cache->stats.read_miss);
963 stats.write_hits = atomic_read(&cache->stats.write_hit);
964 stats.write_misses = atomic_read(&cache->stats.write_miss);
966 dm_cache_metadata_set_stats(cache->cmd, &stats);
969 static void update_stats(struct cache_stats *stats, enum policy_operation op)
973 atomic_inc(&stats->promotion);
977 atomic_inc(&stats->demotion);
980 case POLICY_WRITEBACK:
981 atomic_inc(&stats->writeback);
987 *---------------------------------------------------------------------
988 * Migration processing
990 * Migration covers moving data from the origin device to the cache, or
992 *---------------------------------------------------------------------
994 static void inc_io_migrations(struct cache *cache)
996 atomic_inc(&cache->nr_io_migrations);
999 static void dec_io_migrations(struct cache *cache)
1001 atomic_dec(&cache->nr_io_migrations);
1004 static bool discard_or_flush(struct bio *bio)
1006 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1009 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1010 dm_dblock_t *b, dm_dblock_t *e)
1012 sector_t sb = bio->bi_iter.bi_sector;
1013 sector_t se = bio_end_sector(bio);
1015 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1017 if (se - sb < cache->discard_block_size)
1020 *e = to_dblock(block_div(se, cache->discard_block_size));
1023 /*----------------------------------------------------------------*/
1025 static void prevent_background_work(struct cache *cache)
1028 down_write(&cache->background_work_lock);
1032 static void allow_background_work(struct cache *cache)
1035 up_write(&cache->background_work_lock);
1039 static bool background_work_begin(struct cache *cache)
1044 r = down_read_trylock(&cache->background_work_lock);
1050 static void background_work_end(struct cache *cache)
1053 up_read(&cache->background_work_lock);
1057 /*----------------------------------------------------------------*/
1059 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1061 return (bio_data_dir(bio) == WRITE) &&
1062 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1065 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1067 return writeback_mode(cache) &&
1068 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1071 static void quiesce(struct dm_cache_migration *mg,
1072 void (*continuation)(struct work_struct *))
1074 init_continuation(&mg->k, continuation);
1075 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1078 static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
1080 struct continuation *k = container_of(ws, struct continuation, ws);
1082 return container_of(k, struct dm_cache_migration, k);
1085 static void copy_complete(int read_err, unsigned long write_err, void *context)
1087 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1089 if (read_err || write_err)
1090 mg->k.input = BLK_STS_IOERR;
1092 queue_continuation(mg->cache->wq, &mg->k);
1095 static void copy(struct dm_cache_migration *mg, bool promote)
1097 struct dm_io_region o_region, c_region;
1098 struct cache *cache = mg->cache;
1100 o_region.bdev = cache->origin_dev->bdev;
1101 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1102 o_region.count = cache->sectors_per_block;
1104 c_region.bdev = cache->cache_dev->bdev;
1105 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1106 c_region.count = cache->sectors_per_block;
1109 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1111 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1114 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1116 struct per_bio_data *pb = get_per_bio_data(bio);
1118 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1119 free_prison_cell(cache, pb->cell);
1123 static void overwrite_endio(struct bio *bio)
1125 struct dm_cache_migration *mg = bio->bi_private;
1126 struct cache *cache = mg->cache;
1127 struct per_bio_data *pb = get_per_bio_data(bio);
1129 dm_unhook_bio(&pb->hook_info, bio);
1132 mg->k.input = bio->bi_status;
1134 queue_continuation(cache->wq, &mg->k);
1137 static void overwrite(struct dm_cache_migration *mg,
1138 void (*continuation)(struct work_struct *))
1140 struct bio *bio = mg->overwrite_bio;
1141 struct per_bio_data *pb = get_per_bio_data(bio);
1143 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1146 * The overwrite bio is part of the copy operation, as such it does
1147 * not set/clear discard or dirty flags.
1149 if (mg->op->op == POLICY_PROMOTE)
1150 remap_to_cache(mg->cache, bio, mg->op->cblock);
1152 remap_to_origin(mg->cache, bio);
1154 init_continuation(&mg->k, continuation);
1155 accounted_request(mg->cache, bio);
1161 * 1) exclusive lock preventing WRITEs
1163 * 3) copy or issue overwrite bio
1164 * 4) upgrade to exclusive lock preventing READs and WRITEs
1166 * 6) update metadata and commit
1169 static void mg_complete(struct dm_cache_migration *mg, bool success)
1171 struct bio_list bios;
1172 struct cache *cache = mg->cache;
1173 struct policy_work *op = mg->op;
1174 dm_cblock_t cblock = op->cblock;
1177 update_stats(&cache->stats, op->op);
1180 case POLICY_PROMOTE:
1181 clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1182 policy_complete_background_work(cache->policy, op, success);
1184 if (mg->overwrite_bio) {
1186 force_set_dirty(cache, cblock);
1187 else if (mg->k.input)
1188 mg->overwrite_bio->bi_status = mg->k.input;
1190 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1191 bio_endio(mg->overwrite_bio);
1194 force_clear_dirty(cache, cblock);
1195 dec_io_migrations(cache);
1201 * We clear dirty here to update the nr_dirty counter.
1204 force_clear_dirty(cache, cblock);
1205 policy_complete_background_work(cache->policy, op, success);
1206 dec_io_migrations(cache);
1209 case POLICY_WRITEBACK:
1211 force_clear_dirty(cache, cblock);
1212 policy_complete_background_work(cache->policy, op, success);
1213 dec_io_migrations(cache);
1217 bio_list_init(&bios);
1219 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1220 free_prison_cell(cache, mg->cell);
1224 defer_bios(cache, &bios);
1225 wake_migration_worker(cache);
1227 background_work_end(cache);
1230 static void mg_success(struct work_struct *ws)
1232 struct dm_cache_migration *mg = ws_to_mg(ws);
1234 mg_complete(mg, mg->k.input == 0);
1237 static void mg_update_metadata(struct work_struct *ws)
1240 struct dm_cache_migration *mg = ws_to_mg(ws);
1241 struct cache *cache = mg->cache;
1242 struct policy_work *op = mg->op;
1245 case POLICY_PROMOTE:
1246 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1248 DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
1249 cache_device_name(cache));
1250 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1252 mg_complete(mg, false);
1255 mg_complete(mg, true);
1259 r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1261 DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
1262 cache_device_name(cache));
1263 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1265 mg_complete(mg, false);
1270 * It would be nice if we only had to commit when a REQ_FLUSH
1271 * comes through. But there's one scenario that we have to
1274 * - vblock x in a cache block
1276 * - cache block gets reallocated and over written
1279 * When we recover, because there was no commit the cache will
1280 * rollback to having the data for vblock x in the cache block.
1281 * But the cache block has since been overwritten, so it'll end
1282 * up pointing to data that was never in 'x' during the history
1285 * To avoid this issue we require a commit as part of the
1286 * demotion operation.
1288 init_continuation(&mg->k, mg_success);
1289 continue_after_commit(&cache->committer, &mg->k);
1290 schedule_commit(&cache->committer);
1293 case POLICY_WRITEBACK:
1294 mg_complete(mg, true);
1299 static void mg_update_metadata_after_copy(struct work_struct *ws)
1301 struct dm_cache_migration *mg = ws_to_mg(ws);
1304 * Did the copy succeed?
1307 mg_complete(mg, false);
1309 mg_update_metadata(ws);
1312 static void mg_upgrade_lock(struct work_struct *ws)
1315 struct dm_cache_migration *mg = ws_to_mg(ws);
1318 * Did the copy succeed?
1321 mg_complete(mg, false);
1325 * Now we want the lock to prevent both reads and writes.
1327 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1328 READ_WRITE_LOCK_LEVEL);
1330 mg_complete(mg, false);
1333 quiesce(mg, mg_update_metadata);
1336 mg_update_metadata(ws);
1340 static void mg_full_copy(struct work_struct *ws)
1342 struct dm_cache_migration *mg = ws_to_mg(ws);
1343 struct cache *cache = mg->cache;
1344 struct policy_work *op = mg->op;
1345 bool is_policy_promote = (op->op == POLICY_PROMOTE);
1347 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1348 is_discarded_oblock(cache, op->oblock)) {
1349 mg_upgrade_lock(ws);
1353 init_continuation(&mg->k, mg_upgrade_lock);
1354 copy(mg, is_policy_promote);
1357 static void mg_copy(struct work_struct *ws)
1359 struct dm_cache_migration *mg = ws_to_mg(ws);
1361 if (mg->overwrite_bio) {
1363 * No exclusive lock was held when we last checked if the bio
1364 * was optimisable. So we have to check again in case things
1365 * have changed (eg, the block may no longer be discarded).
1367 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1369 * Fallback to a real full copy after doing some tidying up.
1371 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1373 BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
1374 mg->overwrite_bio = NULL;
1375 inc_io_migrations(mg->cache);
1381 * It's safe to do this here, even though it's new data
1382 * because all IO has been locked out of the block.
1384 * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
1385 * so _not_ using mg_upgrade_lock() as continutation.
1387 overwrite(mg, mg_update_metadata_after_copy);
1393 static int mg_lock_writes(struct dm_cache_migration *mg)
1396 struct dm_cell_key_v2 key;
1397 struct cache *cache = mg->cache;
1398 struct dm_bio_prison_cell_v2 *prealloc;
1400 prealloc = alloc_prison_cell(cache);
1403 * Prevent writes to the block, but allow reads to continue.
1404 * Unless we're using an overwrite bio, in which case we lock
1407 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1408 r = dm_cell_lock_v2(cache->prison, &key,
1409 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1410 prealloc, &mg->cell);
1412 free_prison_cell(cache, prealloc);
1413 mg_complete(mg, false);
1417 if (mg->cell != prealloc)
1418 free_prison_cell(cache, prealloc);
1423 quiesce(mg, mg_copy);
1428 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1430 struct dm_cache_migration *mg;
1432 if (!background_work_begin(cache)) {
1433 policy_complete_background_work(cache->policy, op, false);
1437 mg = alloc_migration(cache);
1440 mg->overwrite_bio = bio;
1443 inc_io_migrations(cache);
1445 return mg_lock_writes(mg);
1449 *--------------------------------------------------------------
1450 * invalidation processing
1451 *--------------------------------------------------------------
1454 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1456 struct bio_list bios;
1457 struct cache *cache = mg->cache;
1459 bio_list_init(&bios);
1460 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1461 free_prison_cell(cache, mg->cell);
1463 if (!success && mg->overwrite_bio)
1464 bio_io_error(mg->overwrite_bio);
1467 defer_bios(cache, &bios);
1469 background_work_end(cache);
1472 static void invalidate_completed(struct work_struct *ws)
1474 struct dm_cache_migration *mg = ws_to_mg(ws);
1476 invalidate_complete(mg, !mg->k.input);
1479 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1483 r = policy_invalidate_mapping(cache->policy, cblock);
1485 r = dm_cache_remove_mapping(cache->cmd, cblock);
1487 DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
1488 cache_device_name(cache));
1489 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1492 } else if (r == -ENODATA) {
1494 * Harmless, already unmapped.
1499 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1504 static void invalidate_remove(struct work_struct *ws)
1507 struct dm_cache_migration *mg = ws_to_mg(ws);
1508 struct cache *cache = mg->cache;
1510 r = invalidate_cblock(cache, mg->invalidate_cblock);
1512 invalidate_complete(mg, false);
1516 init_continuation(&mg->k, invalidate_completed);
1517 continue_after_commit(&cache->committer, &mg->k);
1518 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1519 mg->overwrite_bio = NULL;
1520 schedule_commit(&cache->committer);
1523 static int invalidate_lock(struct dm_cache_migration *mg)
1526 struct dm_cell_key_v2 key;
1527 struct cache *cache = mg->cache;
1528 struct dm_bio_prison_cell_v2 *prealloc;
1530 prealloc = alloc_prison_cell(cache);
1532 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1533 r = dm_cell_lock_v2(cache->prison, &key,
1534 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1536 free_prison_cell(cache, prealloc);
1537 invalidate_complete(mg, false);
1541 if (mg->cell != prealloc)
1542 free_prison_cell(cache, prealloc);
1545 quiesce(mg, invalidate_remove);
1549 * We can't call invalidate_remove() directly here because we
1550 * might still be in request context.
1552 init_continuation(&mg->k, invalidate_remove);
1553 queue_work(cache->wq, &mg->k.ws);
1559 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1560 dm_oblock_t oblock, struct bio *bio)
1562 struct dm_cache_migration *mg;
1564 if (!background_work_begin(cache))
1567 mg = alloc_migration(cache);
1569 mg->overwrite_bio = bio;
1570 mg->invalidate_cblock = cblock;
1571 mg->invalidate_oblock = oblock;
1573 return invalidate_lock(mg);
1577 *--------------------------------------------------------------
1579 *--------------------------------------------------------------
1587 static enum busy spare_migration_bandwidth(struct cache *cache)
1589 bool idle = dm_iot_idle_for(&cache->tracker, HZ);
1590 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1591 cache->sectors_per_block;
1593 if (idle && current_volume <= cache->migration_threshold)
1599 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1601 atomic_inc(bio_data_dir(bio) == READ ?
1602 &cache->stats.read_hit : &cache->stats.write_hit);
1605 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1607 atomic_inc(bio_data_dir(bio) == READ ?
1608 &cache->stats.read_miss : &cache->stats.write_miss);
1611 /*----------------------------------------------------------------*/
1613 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1614 bool *commit_needed)
1617 bool rb, background_queued;
1620 *commit_needed = false;
1622 rb = bio_detain_shared(cache, block, bio);
1625 * An exclusive lock is held for this block, so we have to
1626 * wait. We set the commit_needed flag so the current
1627 * transaction will be committed asap, allowing this lock
1630 *commit_needed = true;
1631 return DM_MAPIO_SUBMITTED;
1634 data_dir = bio_data_dir(bio);
1636 if (optimisable_bio(cache, bio, block)) {
1637 struct policy_work *op = NULL;
1639 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1640 if (unlikely(r && r != -ENOENT)) {
1641 DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
1642 cache_device_name(cache), r);
1644 return DM_MAPIO_SUBMITTED;
1647 if (r == -ENOENT && op) {
1648 bio_drop_shared_lock(cache, bio);
1649 BUG_ON(op->op != POLICY_PROMOTE);
1650 mg_start(cache, op, bio);
1651 return DM_MAPIO_SUBMITTED;
1654 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1655 if (unlikely(r && r != -ENOENT)) {
1656 DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
1657 cache_device_name(cache), r);
1659 return DM_MAPIO_SUBMITTED;
1662 if (background_queued)
1663 wake_migration_worker(cache);
1667 struct per_bio_data *pb = get_per_bio_data(bio);
1672 inc_miss_counter(cache, bio);
1673 if (pb->req_nr == 0) {
1674 accounted_begin(cache, bio);
1675 remap_to_origin_clear_discard(cache, bio, block);
1678 * This is a duplicate writethrough io that is no
1679 * longer needed because the block has been demoted.
1682 return DM_MAPIO_SUBMITTED;
1688 inc_hit_counter(cache, bio);
1691 * Passthrough always maps to the origin, invalidating any
1692 * cache blocks that are written to.
1694 if (passthrough_mode(cache)) {
1695 if (bio_data_dir(bio) == WRITE) {
1696 bio_drop_shared_lock(cache, bio);
1697 atomic_inc(&cache->stats.demotion);
1698 invalidate_start(cache, cblock, block, bio);
1700 remap_to_origin_clear_discard(cache, bio, block);
1702 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1703 !is_dirty(cache, cblock)) {
1704 remap_to_origin_and_cache(cache, bio, block, cblock);
1705 accounted_begin(cache, bio);
1707 remap_to_cache_dirty(cache, bio, block, cblock);
1712 * dm core turns FUA requests into a separate payload and FLUSH req.
1714 if (bio->bi_opf & REQ_FUA) {
1716 * issue_after_commit will call accounted_begin a second time. So
1717 * we call accounted_complete() to avoid double accounting.
1719 accounted_complete(cache, bio);
1720 issue_after_commit(&cache->committer, bio);
1721 *commit_needed = true;
1722 return DM_MAPIO_SUBMITTED;
1725 return DM_MAPIO_REMAPPED;
1728 static bool process_bio(struct cache *cache, struct bio *bio)
1732 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1733 dm_submit_bio_remap(bio, NULL);
1735 return commit_needed;
1739 * A non-zero return indicates read_only or fail_io mode.
1741 static int commit(struct cache *cache, bool clean_shutdown)
1745 if (get_cache_mode(cache) >= CM_READ_ONLY)
1748 atomic_inc(&cache->stats.commit_count);
1749 r = dm_cache_commit(cache->cmd, clean_shutdown);
1751 metadata_operation_failed(cache, "dm_cache_commit", r);
1757 * Used by the batcher.
1759 static blk_status_t commit_op(void *context)
1761 struct cache *cache = context;
1763 if (dm_cache_changed_this_transaction(cache->cmd))
1764 return errno_to_blk_status(commit(cache, false));
1769 /*----------------------------------------------------------------*/
1771 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1773 struct per_bio_data *pb = get_per_bio_data(bio);
1776 remap_to_origin(cache, bio);
1778 remap_to_cache(cache, bio, 0);
1780 issue_after_commit(&cache->committer, bio);
1784 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1789 * FIXME: do we need to lock the region? Or can we just assume the
1790 * user wont be so foolish as to issue discard concurrently with
1793 calc_discard_block_range(cache, bio, &b, &e);
1795 set_discard(cache, b);
1796 b = to_dblock(from_dblock(b) + 1);
1799 if (cache->features.discard_passdown) {
1800 remap_to_origin(cache, bio);
1801 dm_submit_bio_remap(bio, NULL);
1808 static void process_deferred_bios(struct work_struct *ws)
1810 struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1812 bool commit_needed = false;
1813 struct bio_list bios;
1816 bio_list_init(&bios);
1818 spin_lock_irq(&cache->lock);
1819 bio_list_merge(&bios, &cache->deferred_bios);
1820 bio_list_init(&cache->deferred_bios);
1821 spin_unlock_irq(&cache->lock);
1823 while ((bio = bio_list_pop(&bios))) {
1824 if (bio->bi_opf & REQ_PREFLUSH)
1825 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1827 else if (bio_op(bio) == REQ_OP_DISCARD)
1828 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1831 commit_needed = process_bio(cache, bio) || commit_needed;
1836 schedule_commit(&cache->committer);
1840 *--------------------------------------------------------------
1842 *--------------------------------------------------------------
1844 static void requeue_deferred_bios(struct cache *cache)
1847 struct bio_list bios;
1849 bio_list_init(&bios);
1850 bio_list_merge(&bios, &cache->deferred_bios);
1851 bio_list_init(&cache->deferred_bios);
1853 while ((bio = bio_list_pop(&bios))) {
1854 bio->bi_status = BLK_STS_DM_REQUEUE;
1861 * We want to commit periodically so that not too much
1862 * unwritten metadata builds up.
1864 static void do_waker(struct work_struct *ws)
1866 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1868 policy_tick(cache->policy, true);
1869 wake_migration_worker(cache);
1870 schedule_commit(&cache->committer);
1871 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1874 static void check_migrations(struct work_struct *ws)
1877 struct policy_work *op;
1878 struct cache *cache = container_of(ws, struct cache, migration_worker);
1882 b = spare_migration_bandwidth(cache);
1884 r = policy_get_background_work(cache->policy, b == IDLE, &op);
1889 DMERR_LIMIT("%s: policy_background_work failed",
1890 cache_device_name(cache));
1894 r = mg_start(cache, op, NULL);
1903 *--------------------------------------------------------------
1905 *--------------------------------------------------------------
1909 * This function gets called on the error paths of the constructor, so we
1910 * have to cope with a partially initialised struct.
1912 static void destroy(struct cache *cache)
1916 mempool_exit(&cache->migration_pool);
1919 dm_bio_prison_destroy_v2(cache->prison);
1921 cancel_delayed_work_sync(&cache->waker);
1923 destroy_workqueue(cache->wq);
1925 if (cache->dirty_bitset)
1926 free_bitset(cache->dirty_bitset);
1928 if (cache->discard_bitset)
1929 free_bitset(cache->discard_bitset);
1932 dm_kcopyd_client_destroy(cache->copier);
1935 dm_cache_metadata_close(cache->cmd);
1937 if (cache->metadata_dev)
1938 dm_put_device(cache->ti, cache->metadata_dev);
1940 if (cache->origin_dev)
1941 dm_put_device(cache->ti, cache->origin_dev);
1943 if (cache->cache_dev)
1944 dm_put_device(cache->ti, cache->cache_dev);
1947 dm_cache_policy_destroy(cache->policy);
1949 for (i = 0; i < cache->nr_ctr_args ; i++)
1950 kfree(cache->ctr_args[i]);
1951 kfree(cache->ctr_args);
1953 bioset_exit(&cache->bs);
1958 static void cache_dtr(struct dm_target *ti)
1960 struct cache *cache = ti->private;
1965 static sector_t get_dev_size(struct dm_dev *dev)
1967 return bdev_nr_sectors(dev->bdev);
1970 /*----------------------------------------------------------------*/
1973 * Construct a cache device mapping.
1975 * cache <metadata dev> <cache dev> <origin dev> <block size>
1976 * <#feature args> [<feature arg>]*
1977 * <policy> <#policy args> [<policy arg>]*
1979 * metadata dev : fast device holding the persistent metadata
1980 * cache dev : fast device holding cached data blocks
1981 * origin dev : slow device holding original data blocks
1982 * block size : cache unit size in sectors
1984 * #feature args : number of feature arguments passed
1985 * feature args : writethrough. (The default is writeback.)
1987 * policy : the replacement policy to use
1988 * #policy args : an even number of policy arguments corresponding
1989 * to key/value pairs passed to the policy
1990 * policy args : key/value pairs passed to the policy
1991 * E.g. 'sequential_threshold 1024'
1992 * See cache-policies.txt for details.
1994 * Optional feature arguments are:
1995 * writethrough : write through caching that prohibits cache block
1996 * content from being different from origin block content.
1997 * Without this argument, the default behaviour is to write
1998 * back cache block contents later for performance reasons,
1999 * so they may differ from the corresponding origin blocks.
2002 struct dm_target *ti;
2004 struct dm_dev *metadata_dev;
2006 struct dm_dev *cache_dev;
2007 sector_t cache_sectors;
2009 struct dm_dev *origin_dev;
2010 sector_t origin_sectors;
2012 uint32_t block_size;
2014 const char *policy_name;
2016 const char **policy_argv;
2018 struct cache_features features;
2021 static void destroy_cache_args(struct cache_args *ca)
2023 if (ca->metadata_dev)
2024 dm_put_device(ca->ti, ca->metadata_dev);
2027 dm_put_device(ca->ti, ca->cache_dev);
2030 dm_put_device(ca->ti, ca->origin_dev);
2035 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
2038 *error = "Insufficient args";
2045 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2049 sector_t metadata_dev_size;
2051 if (!at_least_one_arg(as, error))
2054 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2057 *error = "Error opening metadata device";
2061 metadata_dev_size = get_dev_size(ca->metadata_dev);
2062 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2063 DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
2064 ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
2069 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2074 if (!at_least_one_arg(as, error))
2077 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2080 *error = "Error opening cache device";
2083 ca->cache_sectors = get_dev_size(ca->cache_dev);
2088 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2093 if (!at_least_one_arg(as, error))
2096 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2099 *error = "Error opening origin device";
2103 ca->origin_sectors = get_dev_size(ca->origin_dev);
2104 if (ca->ti->len > ca->origin_sectors) {
2105 *error = "Device size larger than cached device";
2112 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2115 unsigned long block_size;
2117 if (!at_least_one_arg(as, error))
2120 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2121 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2122 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2123 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2124 *error = "Invalid data block size";
2128 if (block_size > ca->cache_sectors) {
2129 *error = "Data block size is larger than the cache device";
2133 ca->block_size = block_size;
2138 static void init_features(struct cache_features *cf)
2140 cf->mode = CM_WRITE;
2141 cf->io_mode = CM_IO_WRITEBACK;
2142 cf->metadata_version = 1;
2143 cf->discard_passdown = true;
2146 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2149 static const struct dm_arg _args[] = {
2150 {0, 3, "Invalid number of cache feature arguments"},
2153 int r, mode_ctr = 0;
2156 struct cache_features *cf = &ca->features;
2160 r = dm_read_arg_group(_args, as, &argc, error);
2165 arg = dm_shift_arg(as);
2167 if (!strcasecmp(arg, "writeback")) {
2168 cf->io_mode = CM_IO_WRITEBACK;
2172 else if (!strcasecmp(arg, "writethrough")) {
2173 cf->io_mode = CM_IO_WRITETHROUGH;
2177 else if (!strcasecmp(arg, "passthrough")) {
2178 cf->io_mode = CM_IO_PASSTHROUGH;
2182 else if (!strcasecmp(arg, "metadata2"))
2183 cf->metadata_version = 2;
2185 else if (!strcasecmp(arg, "no_discard_passdown"))
2186 cf->discard_passdown = false;
2189 *error = "Unrecognised cache feature requested";
2195 *error = "Duplicate cache io_mode features requested";
2202 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2205 static const struct dm_arg _args[] = {
2206 {0, 1024, "Invalid number of policy arguments"},
2211 if (!at_least_one_arg(as, error))
2214 ca->policy_name = dm_shift_arg(as);
2216 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2220 ca->policy_argv = (const char **)as->argv;
2221 dm_consume_args(as, ca->policy_argc);
2226 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2230 struct dm_arg_set as;
2235 r = parse_metadata_dev(ca, &as, error);
2239 r = parse_cache_dev(ca, &as, error);
2243 r = parse_origin_dev(ca, &as, error);
2247 r = parse_block_size(ca, &as, error);
2251 r = parse_features(ca, &as, error);
2255 r = parse_policy(ca, &as, error);
2262 /*----------------------------------------------------------------*/
2264 static struct kmem_cache *migration_cache;
2266 #define NOT_CORE_OPTION 1
2268 static int process_config_option(struct cache *cache, const char *key, const char *value)
2272 if (!strcasecmp(key, "migration_threshold")) {
2273 if (kstrtoul(value, 10, &tmp))
2276 cache->migration_threshold = tmp;
2280 return NOT_CORE_OPTION;
2283 static int set_config_value(struct cache *cache, const char *key, const char *value)
2285 int r = process_config_option(cache, key, value);
2287 if (r == NOT_CORE_OPTION)
2288 r = policy_set_config_value(cache->policy, key, value);
2291 DMWARN("bad config value for %s: %s", key, value);
2296 static int set_config_values(struct cache *cache, int argc, const char **argv)
2301 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2306 r = set_config_value(cache, argv[0], argv[1]);
2317 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2320 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2322 cache->origin_sectors,
2323 cache->sectors_per_block);
2325 *error = "Error creating cache's policy";
2329 BUG_ON(!cache->policy);
2335 * We want the discard block size to be at least the size of the cache
2336 * block size and have no more than 2^14 discard blocks across the origin.
2338 #define MAX_DISCARD_BLOCKS (1 << 14)
2340 static bool too_many_discard_blocks(sector_t discard_block_size,
2341 sector_t origin_size)
2343 (void) sector_div(origin_size, discard_block_size);
2345 return origin_size > MAX_DISCARD_BLOCKS;
2348 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2349 sector_t origin_size)
2351 sector_t discard_block_size = cache_block_size;
2354 while (too_many_discard_blocks(discard_block_size, origin_size))
2355 discard_block_size *= 2;
2357 return discard_block_size;
2360 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2362 dm_block_t nr_blocks = from_cblock(size);
2364 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2365 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2366 "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2367 "Please consider increasing the cache block size to reduce the overall cache block count.",
2368 (unsigned long long) nr_blocks);
2370 cache->cache_size = size;
2373 #define DEFAULT_MIGRATION_THRESHOLD 2048
2375 static int cache_create(struct cache_args *ca, struct cache **result)
2378 char **error = &ca->ti->error;
2379 struct cache *cache;
2380 struct dm_target *ti = ca->ti;
2381 dm_block_t origin_blocks;
2382 struct dm_cache_metadata *cmd;
2383 bool may_format = ca->features.mode == CM_WRITE;
2385 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2390 ti->private = cache;
2391 ti->accounts_remapped_io = true;
2392 ti->num_flush_bios = 2;
2393 ti->flush_supported = true;
2395 ti->num_discard_bios = 1;
2396 ti->discards_supported = true;
2398 ti->per_io_data_size = sizeof(struct per_bio_data);
2400 cache->features = ca->features;
2401 if (writethrough_mode(cache)) {
2402 /* Create bioset for writethrough bios issued to origin */
2403 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2408 cache->metadata_dev = ca->metadata_dev;
2409 cache->origin_dev = ca->origin_dev;
2410 cache->cache_dev = ca->cache_dev;
2412 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2414 origin_blocks = cache->origin_sectors = ca->origin_sectors;
2415 origin_blocks = block_div(origin_blocks, ca->block_size);
2416 cache->origin_blocks = to_oblock(origin_blocks);
2418 cache->sectors_per_block = ca->block_size;
2419 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2424 if (ca->block_size & (ca->block_size - 1)) {
2425 dm_block_t cache_size = ca->cache_sectors;
2427 cache->sectors_per_block_shift = -1;
2428 cache_size = block_div(cache_size, ca->block_size);
2429 set_cache_size(cache, to_cblock(cache_size));
2431 cache->sectors_per_block_shift = __ffs(ca->block_size);
2432 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2435 r = create_cache_policy(cache, ca, error);
2439 cache->policy_nr_args = ca->policy_argc;
2440 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2442 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2444 *error = "Error setting cache policy's config values";
2448 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2449 ca->block_size, may_format,
2450 dm_cache_policy_get_hint_size(cache->policy),
2451 ca->features.metadata_version);
2453 *error = "Error creating metadata object";
2458 set_cache_mode(cache, CM_WRITE);
2459 if (get_cache_mode(cache) != CM_WRITE) {
2460 *error = "Unable to get write access to metadata, please check/repair metadata.";
2465 if (passthrough_mode(cache)) {
2468 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2470 *error = "dm_cache_metadata_all_clean() failed";
2475 *error = "Cannot enter passthrough mode unless all blocks are clean";
2480 policy_allow_migrations(cache->policy, false);
2483 spin_lock_init(&cache->lock);
2484 bio_list_init(&cache->deferred_bios);
2485 atomic_set(&cache->nr_allocated_migrations, 0);
2486 atomic_set(&cache->nr_io_migrations, 0);
2487 init_waitqueue_head(&cache->migration_wait);
2490 atomic_set(&cache->nr_dirty, 0);
2491 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2492 if (!cache->dirty_bitset) {
2493 *error = "could not allocate dirty bitset";
2496 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2498 cache->discard_block_size =
2499 calculate_discard_block_size(cache->sectors_per_block,
2500 cache->origin_sectors);
2501 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2502 cache->discard_block_size));
2503 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2504 if (!cache->discard_bitset) {
2505 *error = "could not allocate discard bitset";
2508 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2510 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2511 if (IS_ERR(cache->copier)) {
2512 *error = "could not create kcopyd client";
2513 r = PTR_ERR(cache->copier);
2517 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2519 *error = "could not create workqueue for metadata object";
2522 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2523 INIT_WORK(&cache->migration_worker, check_migrations);
2524 INIT_DELAYED_WORK(&cache->waker, do_waker);
2526 cache->prison = dm_bio_prison_create_v2(cache->wq);
2527 if (!cache->prison) {
2528 *error = "could not create bio prison";
2532 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2535 *error = "Error creating cache's migration mempool";
2539 cache->need_tick_bio = true;
2540 cache->sized = false;
2541 cache->invalidate = false;
2542 cache->commit_requested = false;
2543 cache->loaded_mappings = false;
2544 cache->loaded_discards = false;
2548 atomic_set(&cache->stats.demotion, 0);
2549 atomic_set(&cache->stats.promotion, 0);
2550 atomic_set(&cache->stats.copies_avoided, 0);
2551 atomic_set(&cache->stats.cache_cell_clash, 0);
2552 atomic_set(&cache->stats.commit_count, 0);
2553 atomic_set(&cache->stats.discard_count, 0);
2555 spin_lock_init(&cache->invalidation_lock);
2556 INIT_LIST_HEAD(&cache->invalidation_requests);
2558 batcher_init(&cache->committer, commit_op, cache,
2559 issue_op, cache, cache->wq);
2560 dm_iot_init(&cache->tracker);
2562 init_rwsem(&cache->background_work_lock);
2563 prevent_background_work(cache);
2572 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2577 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2580 for (i = 0; i < argc; i++) {
2581 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2590 cache->nr_ctr_args = argc;
2591 cache->ctr_args = copy;
2596 static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2599 struct cache_args *ca;
2600 struct cache *cache = NULL;
2602 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2604 ti->error = "Error allocating memory for cache";
2609 r = parse_cache_args(ca, argc, argv, &ti->error);
2613 r = cache_create(ca, &cache);
2617 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2623 ti->private = cache;
2625 destroy_cache_args(ca);
2629 /*----------------------------------------------------------------*/
2631 static int cache_map(struct dm_target *ti, struct bio *bio)
2633 struct cache *cache = ti->private;
2637 dm_oblock_t block = get_bio_block(cache, bio);
2639 init_per_bio_data(bio);
2640 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2642 * This can only occur if the io goes to a partial block at
2643 * the end of the origin device. We don't cache these.
2644 * Just remap to the origin and carry on.
2646 remap_to_origin(cache, bio);
2647 accounted_begin(cache, bio);
2648 return DM_MAPIO_REMAPPED;
2651 if (discard_or_flush(bio)) {
2652 defer_bio(cache, bio);
2653 return DM_MAPIO_SUBMITTED;
2656 r = map_bio(cache, bio, block, &commit_needed);
2658 schedule_commit(&cache->committer);
2663 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2665 struct cache *cache = ti->private;
2666 unsigned long flags;
2667 struct per_bio_data *pb = get_per_bio_data(bio);
2670 policy_tick(cache->policy, false);
2672 spin_lock_irqsave(&cache->lock, flags);
2673 cache->need_tick_bio = true;
2674 spin_unlock_irqrestore(&cache->lock, flags);
2677 bio_drop_shared_lock(cache, bio);
2678 accounted_complete(cache, bio);
2680 return DM_ENDIO_DONE;
2683 static int write_dirty_bitset(struct cache *cache)
2687 if (get_cache_mode(cache) >= CM_READ_ONLY)
2690 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2692 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2697 static int write_discard_bitset(struct cache *cache)
2701 if (get_cache_mode(cache) >= CM_READ_ONLY)
2704 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2705 cache->discard_nr_blocks);
2707 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2708 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2712 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2713 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2714 is_discarded(cache, to_dblock(i)));
2716 metadata_operation_failed(cache, "dm_cache_set_discard", r);
2724 static int write_hints(struct cache *cache)
2728 if (get_cache_mode(cache) >= CM_READ_ONLY)
2731 r = dm_cache_write_hints(cache->cmd, cache->policy);
2733 metadata_operation_failed(cache, "dm_cache_write_hints", r);
2741 * returns true on success
2743 static bool sync_metadata(struct cache *cache)
2747 r1 = write_dirty_bitset(cache);
2749 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2751 r2 = write_discard_bitset(cache);
2753 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2757 r3 = write_hints(cache);
2759 DMERR("%s: could not write hints", cache_device_name(cache));
2762 * If writing the above metadata failed, we still commit, but don't
2763 * set the clean shutdown flag. This will effectively force every
2764 * dirty bit to be set on reload.
2766 r4 = commit(cache, !r1 && !r2 && !r3);
2768 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2770 return !r1 && !r2 && !r3 && !r4;
2773 static void cache_postsuspend(struct dm_target *ti)
2775 struct cache *cache = ti->private;
2777 prevent_background_work(cache);
2778 BUG_ON(atomic_read(&cache->nr_io_migrations));
2780 cancel_delayed_work_sync(&cache->waker);
2781 drain_workqueue(cache->wq);
2782 WARN_ON(cache->tracker.in_flight);
2785 * If it's a flush suspend there won't be any deferred bios, so this
2788 requeue_deferred_bios(cache);
2790 if (get_cache_mode(cache) == CM_WRITE)
2791 (void) sync_metadata(cache);
2794 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2795 bool dirty, uint32_t hint, bool hint_valid)
2797 struct cache *cache = context;
2800 set_bit(from_cblock(cblock), cache->dirty_bitset);
2801 atomic_inc(&cache->nr_dirty);
2803 clear_bit(from_cblock(cblock), cache->dirty_bitset);
2805 return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2809 * The discard block size in the on disk metadata is not
2810 * necessarily the same as we're currently using. So we have to
2811 * be careful to only set the discarded attribute if we know it
2812 * covers a complete block of the new size.
2814 struct discard_load_info {
2815 struct cache *cache;
2818 * These blocks are sized using the on disk dblock size, rather
2819 * than the current one.
2821 dm_block_t block_size;
2822 dm_block_t discard_begin, discard_end;
2825 static void discard_load_info_init(struct cache *cache,
2826 struct discard_load_info *li)
2829 li->discard_begin = li->discard_end = 0;
2832 static void set_discard_range(struct discard_load_info *li)
2836 if (li->discard_begin == li->discard_end)
2840 * Convert to sectors.
2842 b = li->discard_begin * li->block_size;
2843 e = li->discard_end * li->block_size;
2846 * Then convert back to the current dblock size.
2848 b = dm_sector_div_up(b, li->cache->discard_block_size);
2849 sector_div(e, li->cache->discard_block_size);
2852 * The origin may have shrunk, so we need to check we're still in
2855 if (e > from_dblock(li->cache->discard_nr_blocks))
2856 e = from_dblock(li->cache->discard_nr_blocks);
2859 set_discard(li->cache, to_dblock(b));
2862 static int load_discard(void *context, sector_t discard_block_size,
2863 dm_dblock_t dblock, bool discard)
2865 struct discard_load_info *li = context;
2867 li->block_size = discard_block_size;
2870 if (from_dblock(dblock) == li->discard_end)
2872 * We're already in a discard range, just extend it.
2874 li->discard_end = li->discard_end + 1ULL;
2878 * Emit the old range and start a new one.
2880 set_discard_range(li);
2881 li->discard_begin = from_dblock(dblock);
2882 li->discard_end = li->discard_begin + 1ULL;
2885 set_discard_range(li);
2886 li->discard_begin = li->discard_end = 0;
2892 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2894 sector_t size = get_dev_size(cache->cache_dev);
2895 (void) sector_div(size, cache->sectors_per_block);
2896 return to_cblock(size);
2899 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2901 if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2903 DMERR("%s: unable to extend cache due to missing cache table reload",
2904 cache_device_name(cache));
2910 * We can't drop a dirty block when shrinking the cache.
2912 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2913 new_size = to_cblock(from_cblock(new_size) + 1);
2914 if (is_dirty(cache, new_size)) {
2915 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2916 cache_device_name(cache),
2917 (unsigned long long) from_cblock(new_size));
2925 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2929 r = dm_cache_resize(cache->cmd, new_size);
2931 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2932 metadata_operation_failed(cache, "dm_cache_resize", r);
2936 set_cache_size(cache, new_size);
2941 static int cache_preresume(struct dm_target *ti)
2944 struct cache *cache = ti->private;
2945 dm_cblock_t csize = get_cache_dev_size(cache);
2948 * Check to see if the cache has resized.
2950 if (!cache->sized) {
2951 r = resize_cache_dev(cache, csize);
2955 cache->sized = true;
2957 } else if (csize != cache->cache_size) {
2958 if (!can_resize(cache, csize))
2961 r = resize_cache_dev(cache, csize);
2966 if (!cache->loaded_mappings) {
2967 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2968 load_mapping, cache);
2970 DMERR("%s: could not load cache mappings", cache_device_name(cache));
2971 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
2975 cache->loaded_mappings = true;
2978 if (!cache->loaded_discards) {
2979 struct discard_load_info li;
2982 * The discard bitset could have been resized, or the
2983 * discard block size changed. To be safe we start by
2984 * setting every dblock to not discarded.
2986 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2988 discard_load_info_init(cache, &li);
2989 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
2991 DMERR("%s: could not load origin discards", cache_device_name(cache));
2992 metadata_operation_failed(cache, "dm_cache_load_discards", r);
2995 set_discard_range(&li);
2997 cache->loaded_discards = true;
3003 static void cache_resume(struct dm_target *ti)
3005 struct cache *cache = ti->private;
3007 cache->need_tick_bio = true;
3008 allow_background_work(cache);
3009 do_waker(&cache->waker.work);
3012 static void emit_flags(struct cache *cache, char *result,
3013 unsigned int maxlen, ssize_t *sz_ptr)
3015 ssize_t sz = *sz_ptr;
3016 struct cache_features *cf = &cache->features;
3017 unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
3019 DMEMIT("%u ", count);
3021 if (cf->metadata_version == 2)
3022 DMEMIT("metadata2 ");
3024 if (writethrough_mode(cache))
3025 DMEMIT("writethrough ");
3027 else if (passthrough_mode(cache))
3028 DMEMIT("passthrough ");
3030 else if (writeback_mode(cache))
3031 DMEMIT("writeback ");
3035 DMERR("%s: internal error: unknown io mode: %d",
3036 cache_device_name(cache), (int) cf->io_mode);
3039 if (!cf->discard_passdown)
3040 DMEMIT("no_discard_passdown ");
3048 * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
3049 * <cache block size> <#used cache blocks>/<#total cache blocks>
3050 * <#read hits> <#read misses> <#write hits> <#write misses>
3051 * <#demotions> <#promotions> <#dirty>
3052 * <#features> <features>*
3053 * <#core args> <core args>
3054 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3056 static void cache_status(struct dm_target *ti, status_type_t type,
3057 unsigned int status_flags, char *result, unsigned int maxlen)
3062 dm_block_t nr_free_blocks_metadata = 0;
3063 dm_block_t nr_blocks_metadata = 0;
3064 char buf[BDEVNAME_SIZE];
3065 struct cache *cache = ti->private;
3066 dm_cblock_t residency;
3070 case STATUSTYPE_INFO:
3071 if (get_cache_mode(cache) == CM_FAIL) {
3076 /* Commit to ensure statistics aren't out-of-date */
3077 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3078 (void) commit(cache, false);
3080 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3082 DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
3083 cache_device_name(cache), r);
3087 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3089 DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
3090 cache_device_name(cache), r);
3094 residency = policy_residency(cache->policy);
3096 DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
3097 (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
3098 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3099 (unsigned long long)nr_blocks_metadata,
3100 (unsigned long long)cache->sectors_per_block,
3101 (unsigned long long) from_cblock(residency),
3102 (unsigned long long) from_cblock(cache->cache_size),
3103 (unsigned int) atomic_read(&cache->stats.read_hit),
3104 (unsigned int) atomic_read(&cache->stats.read_miss),
3105 (unsigned int) atomic_read(&cache->stats.write_hit),
3106 (unsigned int) atomic_read(&cache->stats.write_miss),
3107 (unsigned int) atomic_read(&cache->stats.demotion),
3108 (unsigned int) atomic_read(&cache->stats.promotion),
3109 (unsigned long) atomic_read(&cache->nr_dirty));
3111 emit_flags(cache, result, maxlen, &sz);
3113 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3115 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3117 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3119 DMERR("%s: policy_emit_config_values returned %d",
3120 cache_device_name(cache), r);
3123 if (get_cache_mode(cache) == CM_READ_ONLY)
3128 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3130 if (r || needs_check)
3131 DMEMIT("needs_check ");
3137 case STATUSTYPE_TABLE:
3138 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3140 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3142 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3145 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3146 DMEMIT(" %s", cache->ctr_args[i]);
3147 if (cache->nr_ctr_args)
3148 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3151 case STATUSTYPE_IMA:
3152 DMEMIT_TARGET_NAME_VERSION(ti->type);
3153 if (get_cache_mode(cache) == CM_FAIL)
3154 DMEMIT(",metadata_mode=fail");
3155 else if (get_cache_mode(cache) == CM_READ_ONLY)
3156 DMEMIT(",metadata_mode=ro");
3158 DMEMIT(",metadata_mode=rw");
3160 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3161 DMEMIT(",cache_metadata_device=%s", buf);
3162 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3163 DMEMIT(",cache_device=%s", buf);
3164 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3165 DMEMIT(",cache_origin_device=%s", buf);
3166 DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
3167 DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
3168 DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
3169 DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
3170 DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
3182 * Defines a range of cblocks, begin to (end - 1) are in the range. end is
3183 * the one-past-the-end value.
3185 struct cblock_range {
3191 * A cache block range can take two forms:
3193 * i) A single cblock, eg. '3456'
3194 * ii) A begin and end cblock with a dash between, eg. 123-234
3196 static int parse_cblock_range(struct cache *cache, const char *str,
3197 struct cblock_range *result)
3204 * Try and parse form (ii) first.
3206 r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3211 result->begin = to_cblock(b);
3212 result->end = to_cblock(e);
3217 * That didn't work, try form (i).
3219 r = sscanf(str, "%llu%c", &b, &dummy);
3224 result->begin = to_cblock(b);
3225 result->end = to_cblock(from_cblock(result->begin) + 1u);
3229 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3233 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3235 uint64_t b = from_cblock(range->begin);
3236 uint64_t e = from_cblock(range->end);
3237 uint64_t n = from_cblock(cache->cache_size);
3240 DMERR("%s: begin cblock out of range: %llu >= %llu",
3241 cache_device_name(cache), b, n);
3246 DMERR("%s: end cblock out of range: %llu > %llu",
3247 cache_device_name(cache), e, n);
3252 DMERR("%s: invalid cblock range: %llu >= %llu",
3253 cache_device_name(cache), b, e);
3260 static inline dm_cblock_t cblock_succ(dm_cblock_t b)
3262 return to_cblock(from_cblock(b) + 1);
3265 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3270 * We don't need to do any locking here because we know we're in
3271 * passthrough mode. There's is potential for a race between an
3272 * invalidation triggered by an io and an invalidation message. This
3273 * is harmless, we must not worry if the policy call fails.
3275 while (range->begin != range->end) {
3276 r = invalidate_cblock(cache, range->begin);
3280 range->begin = cblock_succ(range->begin);
3283 cache->commit_requested = true;
3287 static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
3288 const char **cblock_ranges)
3292 struct cblock_range range;
3294 if (!passthrough_mode(cache)) {
3295 DMERR("%s: cache has to be in passthrough mode for invalidation",
3296 cache_device_name(cache));
3300 for (i = 0; i < count; i++) {
3301 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3305 r = validate_cblock_range(cache, &range);
3310 * Pass begin and end origin blocks to the worker and wake it.
3312 r = request_invalidation(cache, &range);
3324 * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3326 * The key migration_threshold is supported by the cache target core.
3328 static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
3329 char *result, unsigned int maxlen)
3331 struct cache *cache = ti->private;
3336 if (get_cache_mode(cache) >= CM_READ_ONLY) {
3337 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3338 cache_device_name(cache));
3342 if (!strcasecmp(argv[0], "invalidate_cblocks"))
3343 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3348 return set_config_value(cache, argv[0], argv[1]);
3351 static int cache_iterate_devices(struct dm_target *ti,
3352 iterate_devices_callout_fn fn, void *data)
3355 struct cache *cache = ti->private;
3357 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3359 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3365 * If discard_passdown was enabled verify that the origin device
3366 * supports discards. Disable discard_passdown if not.
3368 static void disable_passdown_if_not_supported(struct cache *cache)
3370 struct block_device *origin_bdev = cache->origin_dev->bdev;
3371 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3372 const char *reason = NULL;
3374 if (!cache->features.discard_passdown)
3377 if (!bdev_max_discard_sectors(origin_bdev))
3378 reason = "discard unsupported";
3380 else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3381 reason = "max discard sectors smaller than a block";
3384 DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
3385 origin_bdev, reason);
3386 cache->features.discard_passdown = false;
3390 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3392 struct block_device *origin_bdev = cache->origin_dev->bdev;
3393 struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
3395 if (!cache->features.discard_passdown) {
3396 /* No passdown is done so setting own virtual limits */
3397 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3398 cache->origin_sectors);
3399 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3404 * cache_iterate_devices() is stacking both origin and fast device limits
3405 * but discards aren't passed to fast device, so inherit origin's limits.
3407 limits->max_discard_sectors = origin_limits->max_discard_sectors;
3408 limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
3409 limits->discard_granularity = origin_limits->discard_granularity;
3410 limits->discard_alignment = origin_limits->discard_alignment;
3411 limits->discard_misaligned = origin_limits->discard_misaligned;
3414 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3416 struct cache *cache = ti->private;
3417 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3420 * If the system-determined stacked limits are compatible with the
3421 * cache's blocksize (io_opt is a factor) do not override them.
3423 if (io_opt_sectors < cache->sectors_per_block ||
3424 do_div(io_opt_sectors, cache->sectors_per_block)) {
3425 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3426 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3429 disable_passdown_if_not_supported(cache);
3430 set_discard_limits(cache, limits);
3433 /*----------------------------------------------------------------*/
3435 static struct target_type cache_target = {
3437 .version = {2, 2, 0},
3438 .module = THIS_MODULE,
3442 .end_io = cache_end_io,
3443 .postsuspend = cache_postsuspend,
3444 .preresume = cache_preresume,
3445 .resume = cache_resume,
3446 .status = cache_status,
3447 .message = cache_message,
3448 .iterate_devices = cache_iterate_devices,
3449 .io_hints = cache_io_hints,
3452 static int __init dm_cache_init(void)
3456 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3457 if (!migration_cache)
3460 r = dm_register_target(&cache_target);
3462 kmem_cache_destroy(migration_cache);
3469 static void __exit dm_cache_exit(void)
3471 dm_unregister_target(&cache_target);
3472 kmem_cache_destroy(migration_cache);
3475 module_init(dm_cache_init);
3476 module_exit(dm_cache_exit);
3478 MODULE_DESCRIPTION(DM_NAME " cache target");
3480 MODULE_LICENSE("GPL");