]> Git Repo - linux.git/blob - drivers/md/dm-cache-metadata.c
Merge tag 'for-6.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / md / dm-cache-metadata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-cache-metadata.h"
9
10 #include "persistent-data/dm-array.h"
11 #include "persistent-data/dm-bitset.h"
12 #include "persistent-data/dm-space-map.h"
13 #include "persistent-data/dm-space-map-disk.h"
14 #include "persistent-data/dm-transaction-manager.h"
15
16 #include <linux/device-mapper.h>
17 #include <linux/refcount.h>
18
19 /*----------------------------------------------------------------*/
20
21 #define DM_MSG_PREFIX   "cache metadata"
22
23 #define CACHE_SUPERBLOCK_MAGIC 06142003
24 #define CACHE_SUPERBLOCK_LOCATION 0
25
26 /*
27  * defines a range of metadata versions that this module can handle.
28  */
29 #define MIN_CACHE_VERSION 1
30 #define MAX_CACHE_VERSION 2
31
32 /*
33  *  3 for btree insert +
34  *  2 for btree lookup used within space map
35  */
36 #define CACHE_MAX_CONCURRENT_LOCKS 5
37 #define SPACE_MAP_ROOT_SIZE 128
38
39 enum superblock_flag_bits {
40         /* for spotting crashes that would invalidate the dirty bitset */
41         CLEAN_SHUTDOWN,
42         /* metadata must be checked using the tools */
43         NEEDS_CHECK,
44 };
45
46 /*
47  * Each mapping from cache block -> origin block carries a set of flags.
48  */
49 enum mapping_bits {
50         /*
51          * A valid mapping.  Because we're using an array we clear this
52          * flag for an non existant mapping.
53          */
54         M_VALID = 1,
55
56         /*
57          * The data on the cache is different from that on the origin.
58          * This flag is only used by metadata format 1.
59          */
60         M_DIRTY = 2
61 };
62
63 struct cache_disk_superblock {
64         __le32 csum;
65         __le32 flags;
66         __le64 blocknr;
67
68         __u8 uuid[16];
69         __le64 magic;
70         __le32 version;
71
72         __u8 policy_name[CACHE_POLICY_NAME_SIZE];
73         __le32 policy_hint_size;
74
75         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
76         __le64 mapping_root;
77         __le64 hint_root;
78
79         __le64 discard_root;
80         __le64 discard_block_size;
81         __le64 discard_nr_blocks;
82
83         __le32 data_block_size;
84         __le32 metadata_block_size;
85         __le32 cache_blocks;
86
87         __le32 compat_flags;
88         __le32 compat_ro_flags;
89         __le32 incompat_flags;
90
91         __le32 read_hits;
92         __le32 read_misses;
93         __le32 write_hits;
94         __le32 write_misses;
95
96         __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
97
98         /*
99          * Metadata format 2 fields.
100          */
101         __le64 dirty_root;
102 } __packed;
103
104 struct dm_cache_metadata {
105         refcount_t ref_count;
106         struct list_head list;
107
108         unsigned int version;
109         struct block_device *bdev;
110         struct dm_block_manager *bm;
111         struct dm_space_map *metadata_sm;
112         struct dm_transaction_manager *tm;
113
114         struct dm_array_info info;
115         struct dm_array_info hint_info;
116         struct dm_disk_bitset discard_info;
117
118         struct rw_semaphore root_lock;
119         unsigned long flags;
120         dm_block_t root;
121         dm_block_t hint_root;
122         dm_block_t discard_root;
123
124         sector_t discard_block_size;
125         dm_dblock_t discard_nr_blocks;
126
127         sector_t data_block_size;
128         dm_cblock_t cache_blocks;
129         bool changed:1;
130         bool clean_when_opened:1;
131
132         char policy_name[CACHE_POLICY_NAME_SIZE];
133         unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
134         size_t policy_hint_size;
135         struct dm_cache_statistics stats;
136
137         /*
138          * Reading the space map root can fail, so we read it into this
139          * buffer before the superblock is locked and updated.
140          */
141         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
142
143         /*
144          * Set if a transaction has to be aborted but the attempt to roll
145          * back to the previous (good) transaction failed.  The only
146          * metadata operation permissible in this state is the closing of
147          * the device.
148          */
149         bool fail_io:1;
150
151         /*
152          * Metadata format 2 fields.
153          */
154         dm_block_t dirty_root;
155         struct dm_disk_bitset dirty_info;
156
157         /*
158          * These structures are used when loading metadata.  They're too
159          * big to put on the stack.
160          */
161         struct dm_array_cursor mapping_cursor;
162         struct dm_array_cursor hint_cursor;
163         struct dm_bitset_cursor dirty_cursor;
164 };
165
166 /*
167  *-----------------------------------------------------------------
168  * superblock validator
169  *-----------------------------------------------------------------
170  */
171 #define SUPERBLOCK_CSUM_XOR 9031977
172
173 static void sb_prepare_for_write(const struct dm_block_validator *v,
174                                  struct dm_block *b,
175                                  size_t sb_block_size)
176 {
177         struct cache_disk_superblock *disk_super = dm_block_data(b);
178
179         disk_super->blocknr = cpu_to_le64(dm_block_location(b));
180         disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
181                                                       sb_block_size - sizeof(__le32),
182                                                       SUPERBLOCK_CSUM_XOR));
183 }
184
185 static int check_metadata_version(struct cache_disk_superblock *disk_super)
186 {
187         uint32_t metadata_version = le32_to_cpu(disk_super->version);
188
189         if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
190                 DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
191                       metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
192                 return -EINVAL;
193         }
194
195         return 0;
196 }
197
198 static int sb_check(const struct dm_block_validator *v,
199                     struct dm_block *b,
200                     size_t sb_block_size)
201 {
202         struct cache_disk_superblock *disk_super = dm_block_data(b);
203         __le32 csum_le;
204
205         if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
206                 DMERR("%s failed: blocknr %llu: wanted %llu",
207                       __func__, le64_to_cpu(disk_super->blocknr),
208                       (unsigned long long)dm_block_location(b));
209                 return -ENOTBLK;
210         }
211
212         if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
213                 DMERR("%s failed: magic %llu: wanted %llu",
214                       __func__, le64_to_cpu(disk_super->magic),
215                       (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
216                 return -EILSEQ;
217         }
218
219         csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
220                                              sb_block_size - sizeof(__le32),
221                                              SUPERBLOCK_CSUM_XOR));
222         if (csum_le != disk_super->csum) {
223                 DMERR("%s failed: csum %u: wanted %u",
224                       __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
225                 return -EILSEQ;
226         }
227
228         return check_metadata_version(disk_super);
229 }
230
231 static const struct dm_block_validator sb_validator = {
232         .name = "superblock",
233         .prepare_for_write = sb_prepare_for_write,
234         .check = sb_check
235 };
236
237 /*----------------------------------------------------------------*/
238
239 static int superblock_read_lock(struct dm_cache_metadata *cmd,
240                                 struct dm_block **sblock)
241 {
242         return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
243                                &sb_validator, sblock);
244 }
245
246 static int superblock_lock_zero(struct dm_cache_metadata *cmd,
247                                 struct dm_block **sblock)
248 {
249         return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
250                                      &sb_validator, sblock);
251 }
252
253 static int superblock_lock(struct dm_cache_metadata *cmd,
254                            struct dm_block **sblock)
255 {
256         return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
257                                 &sb_validator, sblock);
258 }
259
260 /*----------------------------------------------------------------*/
261
262 static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
263 {
264         int r;
265         unsigned int i;
266         struct dm_block *b;
267         __le64 *data_le, zero = cpu_to_le64(0);
268         unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
269
270         /*
271          * We can't use a validator here - it may be all zeroes.
272          */
273         r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
274         if (r)
275                 return r;
276
277         data_le = dm_block_data(b);
278         *result = true;
279         for (i = 0; i < sb_block_size; i++) {
280                 if (data_le[i] != zero) {
281                         *result = false;
282                         break;
283                 }
284         }
285
286         dm_bm_unlock(b);
287
288         return 0;
289 }
290
291 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
292 {
293         struct dm_btree_value_type vt;
294
295         vt.context = NULL;
296         vt.size = sizeof(__le64);
297         vt.inc = NULL;
298         vt.dec = NULL;
299         vt.equal = NULL;
300         dm_array_info_init(&cmd->info, cmd->tm, &vt);
301
302         if (cmd->policy_hint_size) {
303                 vt.size = sizeof(__le32);
304                 dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
305         }
306 }
307
308 static int __save_sm_root(struct dm_cache_metadata *cmd)
309 {
310         int r;
311         size_t metadata_len;
312
313         r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
314         if (r < 0)
315                 return r;
316
317         return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
318                                metadata_len);
319 }
320
321 static void __copy_sm_root(struct dm_cache_metadata *cmd,
322                            struct cache_disk_superblock *disk_super)
323 {
324         memcpy(&disk_super->metadata_space_map_root,
325                &cmd->metadata_space_map_root,
326                sizeof(cmd->metadata_space_map_root));
327 }
328
329 static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
330 {
331         return cmd->version >= 2;
332 }
333
334 static int __write_initial_superblock(struct dm_cache_metadata *cmd)
335 {
336         int r;
337         struct dm_block *sblock;
338         struct cache_disk_superblock *disk_super;
339         sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
340
341         /* FIXME: see if we can lose the max sectors limit */
342         if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
343                 bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
344
345         r = dm_tm_pre_commit(cmd->tm);
346         if (r < 0)
347                 return r;
348
349         /*
350          * dm_sm_copy_root() can fail.  So we need to do it before we start
351          * updating the superblock.
352          */
353         r = __save_sm_root(cmd);
354         if (r)
355                 return r;
356
357         r = superblock_lock_zero(cmd, &sblock);
358         if (r)
359                 return r;
360
361         disk_super = dm_block_data(sblock);
362         disk_super->flags = 0;
363         memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
364         disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
365         disk_super->version = cpu_to_le32(cmd->version);
366         memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
367         memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
368         disk_super->policy_hint_size = cpu_to_le32(0);
369
370         __copy_sm_root(cmd, disk_super);
371
372         disk_super->mapping_root = cpu_to_le64(cmd->root);
373         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
374         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
375         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
376         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
377         disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
378         disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
379         disk_super->cache_blocks = cpu_to_le32(0);
380
381         disk_super->read_hits = cpu_to_le32(0);
382         disk_super->read_misses = cpu_to_le32(0);
383         disk_super->write_hits = cpu_to_le32(0);
384         disk_super->write_misses = cpu_to_le32(0);
385
386         if (separate_dirty_bits(cmd))
387                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
388
389         return dm_tm_commit(cmd->tm, sblock);
390 }
391
392 static int __format_metadata(struct dm_cache_metadata *cmd)
393 {
394         int r;
395
396         r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
397                                  &cmd->tm, &cmd->metadata_sm);
398         if (r < 0) {
399                 DMERR("tm_create_with_sm failed");
400                 return r;
401         }
402
403         __setup_mapping_info(cmd);
404
405         r = dm_array_empty(&cmd->info, &cmd->root);
406         if (r < 0)
407                 goto bad;
408
409         if (separate_dirty_bits(cmd)) {
410                 dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
411                 r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
412                 if (r < 0)
413                         goto bad;
414         }
415
416         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
417         r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
418         if (r < 0)
419                 goto bad;
420
421         cmd->discard_block_size = 0;
422         cmd->discard_nr_blocks = 0;
423
424         r = __write_initial_superblock(cmd);
425         if (r)
426                 goto bad;
427
428         cmd->clean_when_opened = true;
429         return 0;
430
431 bad:
432         dm_tm_destroy(cmd->tm);
433         dm_sm_destroy(cmd->metadata_sm);
434
435         return r;
436 }
437
438 static int __check_incompat_features(struct cache_disk_superblock *disk_super,
439                                      struct dm_cache_metadata *cmd)
440 {
441         uint32_t incompat_flags, features;
442
443         incompat_flags = le32_to_cpu(disk_super->incompat_flags);
444         features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
445         if (features) {
446                 DMERR("could not access metadata due to unsupported optional features (%lx).",
447                       (unsigned long)features);
448                 return -EINVAL;
449         }
450
451         /*
452          * Check for read-only metadata to skip the following RDWR checks.
453          */
454         if (bdev_read_only(cmd->bdev))
455                 return 0;
456
457         features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
458         if (features) {
459                 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
460                       (unsigned long)features);
461                 return -EINVAL;
462         }
463
464         return 0;
465 }
466
467 static int __open_metadata(struct dm_cache_metadata *cmd)
468 {
469         int r;
470         struct dm_block *sblock;
471         struct cache_disk_superblock *disk_super;
472         unsigned long sb_flags;
473
474         r = superblock_read_lock(cmd, &sblock);
475         if (r < 0) {
476                 DMERR("couldn't read lock superblock");
477                 return r;
478         }
479
480         disk_super = dm_block_data(sblock);
481
482         /* Verify the data block size hasn't changed */
483         if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
484                 DMERR("changing the data block size (from %u to %llu) is not supported",
485                       le32_to_cpu(disk_super->data_block_size),
486                       (unsigned long long)cmd->data_block_size);
487                 r = -EINVAL;
488                 goto bad;
489         }
490
491         r = __check_incompat_features(disk_super, cmd);
492         if (r < 0)
493                 goto bad;
494
495         r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
496                                disk_super->metadata_space_map_root,
497                                sizeof(disk_super->metadata_space_map_root),
498                                &cmd->tm, &cmd->metadata_sm);
499         if (r < 0) {
500                 DMERR("tm_open_with_sm failed");
501                 goto bad;
502         }
503
504         __setup_mapping_info(cmd);
505         dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
506         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
507         sb_flags = le32_to_cpu(disk_super->flags);
508         cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
509         dm_bm_unlock(sblock);
510
511         return 0;
512
513 bad:
514         dm_bm_unlock(sblock);
515         return r;
516 }
517
518 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
519                                      bool format_device)
520 {
521         int r;
522         bool unformatted = false;
523
524         r = __superblock_all_zeroes(cmd->bm, &unformatted);
525         if (r)
526                 return r;
527
528         if (unformatted)
529                 return format_device ? __format_metadata(cmd) : -EPERM;
530
531         return __open_metadata(cmd);
532 }
533
534 static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
535                                             bool may_format_device)
536 {
537         int r;
538
539         cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
540                                           CACHE_MAX_CONCURRENT_LOCKS);
541         if (IS_ERR(cmd->bm)) {
542                 DMERR("could not create block manager");
543                 r = PTR_ERR(cmd->bm);
544                 cmd->bm = NULL;
545                 return r;
546         }
547
548         r = __open_or_format_metadata(cmd, may_format_device);
549         if (r) {
550                 dm_block_manager_destroy(cmd->bm);
551                 cmd->bm = NULL;
552         }
553
554         return r;
555 }
556
557 static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
558                                               bool destroy_bm)
559 {
560         dm_sm_destroy(cmd->metadata_sm);
561         dm_tm_destroy(cmd->tm);
562         if (destroy_bm)
563                 dm_block_manager_destroy(cmd->bm);
564 }
565
566 typedef unsigned long (*flags_mutator)(unsigned long);
567
568 static void update_flags(struct cache_disk_superblock *disk_super,
569                          flags_mutator mutator)
570 {
571         uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
572
573         disk_super->flags = cpu_to_le32(sb_flags);
574 }
575
576 static unsigned long set_clean_shutdown(unsigned long flags)
577 {
578         set_bit(CLEAN_SHUTDOWN, &flags);
579         return flags;
580 }
581
582 static unsigned long clear_clean_shutdown(unsigned long flags)
583 {
584         clear_bit(CLEAN_SHUTDOWN, &flags);
585         return flags;
586 }
587
588 static void read_superblock_fields(struct dm_cache_metadata *cmd,
589                                    struct cache_disk_superblock *disk_super)
590 {
591         cmd->version = le32_to_cpu(disk_super->version);
592         cmd->flags = le32_to_cpu(disk_super->flags);
593         cmd->root = le64_to_cpu(disk_super->mapping_root);
594         cmd->hint_root = le64_to_cpu(disk_super->hint_root);
595         cmd->discard_root = le64_to_cpu(disk_super->discard_root);
596         cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
597         cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
598         cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
599         cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
600         strscpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
601         cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
602         cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
603         cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
604         cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
605
606         cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
607         cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
608         cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
609         cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
610
611         if (separate_dirty_bits(cmd))
612                 cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
613
614         cmd->changed = false;
615 }
616
617 /*
618  * The mutator updates the superblock flags.
619  */
620 static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
621                                      flags_mutator mutator)
622 {
623         int r;
624         struct cache_disk_superblock *disk_super;
625         struct dm_block *sblock;
626
627         r = superblock_lock(cmd, &sblock);
628         if (r)
629                 return r;
630
631         disk_super = dm_block_data(sblock);
632         update_flags(disk_super, mutator);
633         read_superblock_fields(cmd, disk_super);
634         dm_bm_unlock(sblock);
635
636         return dm_bm_flush(cmd->bm);
637 }
638
639 static int __begin_transaction(struct dm_cache_metadata *cmd)
640 {
641         int r;
642         struct cache_disk_superblock *disk_super;
643         struct dm_block *sblock;
644
645         /*
646          * We re-read the superblock every time.  Shouldn't need to do this
647          * really.
648          */
649         r = superblock_read_lock(cmd, &sblock);
650         if (r)
651                 return r;
652
653         disk_super = dm_block_data(sblock);
654         read_superblock_fields(cmd, disk_super);
655         dm_bm_unlock(sblock);
656
657         return 0;
658 }
659
660 static int __commit_transaction(struct dm_cache_metadata *cmd,
661                                 flags_mutator mutator)
662 {
663         int r;
664         struct cache_disk_superblock *disk_super;
665         struct dm_block *sblock;
666
667         /*
668          * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
669          */
670         BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
671
672         if (separate_dirty_bits(cmd)) {
673                 r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
674                                     &cmd->dirty_root);
675                 if (r)
676                         return r;
677         }
678
679         r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
680                             &cmd->discard_root);
681         if (r)
682                 return r;
683
684         r = dm_tm_pre_commit(cmd->tm);
685         if (r < 0)
686                 return r;
687
688         r = __save_sm_root(cmd);
689         if (r)
690                 return r;
691
692         r = superblock_lock(cmd, &sblock);
693         if (r)
694                 return r;
695
696         disk_super = dm_block_data(sblock);
697
698         disk_super->flags = cpu_to_le32(cmd->flags);
699         if (mutator)
700                 update_flags(disk_super, mutator);
701
702         disk_super->mapping_root = cpu_to_le64(cmd->root);
703         if (separate_dirty_bits(cmd))
704                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
705         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
706         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
707         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
708         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
709         disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
710         strscpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
711         disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
712         disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
713         disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
714         disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
715
716         disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
717         disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
718         disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
719         disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
720         __copy_sm_root(cmd, disk_super);
721
722         return dm_tm_commit(cmd->tm, sblock);
723 }
724
725 /*----------------------------------------------------------------*/
726
727 /*
728  * The mappings are held in a dm-array that has 64-bit values stored in
729  * little-endian format.  The index is the cblock, the high 48bits of the
730  * value are the oblock and the low 16 bit the flags.
731  */
732 #define FLAGS_MASK ((1 << 16) - 1)
733
734 static __le64 pack_value(dm_oblock_t block, unsigned int flags)
735 {
736         uint64_t value = from_oblock(block);
737
738         value <<= 16;
739         value = value | (flags & FLAGS_MASK);
740         return cpu_to_le64(value);
741 }
742
743 static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
744 {
745         uint64_t value = le64_to_cpu(value_le);
746         uint64_t b = value >> 16;
747
748         *block = to_oblock(b);
749         *flags = value & FLAGS_MASK;
750 }
751
752 /*----------------------------------------------------------------*/
753
754 static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
755                                                sector_t data_block_size,
756                                                bool may_format_device,
757                                                size_t policy_hint_size,
758                                                unsigned int metadata_version)
759 {
760         int r;
761         struct dm_cache_metadata *cmd;
762
763         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
764         if (!cmd) {
765                 DMERR("could not allocate metadata struct");
766                 return ERR_PTR(-ENOMEM);
767         }
768
769         cmd->version = metadata_version;
770         refcount_set(&cmd->ref_count, 1);
771         init_rwsem(&cmd->root_lock);
772         cmd->bdev = bdev;
773         cmd->data_block_size = data_block_size;
774         cmd->cache_blocks = 0;
775         cmd->policy_hint_size = policy_hint_size;
776         cmd->changed = true;
777         cmd->fail_io = false;
778
779         r = __create_persistent_data_objects(cmd, may_format_device);
780         if (r) {
781                 kfree(cmd);
782                 return ERR_PTR(r);
783         }
784
785         r = __begin_transaction_flags(cmd, clear_clean_shutdown);
786         if (r < 0) {
787                 dm_cache_metadata_close(cmd);
788                 return ERR_PTR(r);
789         }
790
791         return cmd;
792 }
793
794 /*
795  * We keep a little list of ref counted metadata objects to prevent two
796  * different target instances creating separate bufio instances.  This is
797  * an issue if a table is reloaded before the suspend.
798  */
799 static DEFINE_MUTEX(table_lock);
800 static LIST_HEAD(table);
801
802 static struct dm_cache_metadata *lookup(struct block_device *bdev)
803 {
804         struct dm_cache_metadata *cmd;
805
806         list_for_each_entry(cmd, &table, list)
807                 if (cmd->bdev == bdev) {
808                         refcount_inc(&cmd->ref_count);
809                         return cmd;
810                 }
811
812         return NULL;
813 }
814
815 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
816                                                 sector_t data_block_size,
817                                                 bool may_format_device,
818                                                 size_t policy_hint_size,
819                                                 unsigned int metadata_version)
820 {
821         struct dm_cache_metadata *cmd, *cmd2;
822
823         mutex_lock(&table_lock);
824         cmd = lookup(bdev);
825         mutex_unlock(&table_lock);
826
827         if (cmd)
828                 return cmd;
829
830         cmd = metadata_open(bdev, data_block_size, may_format_device,
831                             policy_hint_size, metadata_version);
832         if (!IS_ERR(cmd)) {
833                 mutex_lock(&table_lock);
834                 cmd2 = lookup(bdev);
835                 if (cmd2) {
836                         mutex_unlock(&table_lock);
837                         __destroy_persistent_data_objects(cmd, true);
838                         kfree(cmd);
839                         return cmd2;
840                 }
841                 list_add(&cmd->list, &table);
842                 mutex_unlock(&table_lock);
843         }
844
845         return cmd;
846 }
847
848 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
849 {
850         if (cmd->data_block_size != data_block_size) {
851                 DMERR("data_block_size (%llu) different from that in metadata (%llu)",
852                       (unsigned long long) data_block_size,
853                       (unsigned long long) cmd->data_block_size);
854                 return false;
855         }
856
857         return true;
858 }
859
860 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
861                                                  sector_t data_block_size,
862                                                  bool may_format_device,
863                                                  size_t policy_hint_size,
864                                                  unsigned int metadata_version)
865 {
866         struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
867                                                        policy_hint_size, metadata_version);
868
869         if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
870                 dm_cache_metadata_close(cmd);
871                 return ERR_PTR(-EINVAL);
872         }
873
874         return cmd;
875 }
876
877 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
878 {
879         if (refcount_dec_and_test(&cmd->ref_count)) {
880                 mutex_lock(&table_lock);
881                 list_del(&cmd->list);
882                 mutex_unlock(&table_lock);
883
884                 if (!cmd->fail_io)
885                         __destroy_persistent_data_objects(cmd, true);
886                 kfree(cmd);
887         }
888 }
889
890 /*
891  * Checks that the given cache block is either unmapped or clean.
892  */
893 static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
894                                       bool *result)
895 {
896         int r;
897         __le64 value;
898         dm_oblock_t ob;
899         unsigned int flags;
900
901         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
902         if (r)
903                 return r;
904
905         unpack_value(value, &ob, &flags);
906         *result = !((flags & M_VALID) && (flags & M_DIRTY));
907
908         return 0;
909 }
910
911 static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
912                                            dm_cblock_t begin, dm_cblock_t end,
913                                            bool *result)
914 {
915         int r;
916         *result = true;
917
918         while (begin != end) {
919                 r = block_clean_combined_dirty(cmd, begin, result);
920                 if (r) {
921                         DMERR("block_clean_combined_dirty failed");
922                         return r;
923                 }
924
925                 if (!*result) {
926                         DMERR("cache block %llu is dirty",
927                               (unsigned long long) from_cblock(begin));
928                         return 0;
929                 }
930
931                 begin = to_cblock(from_cblock(begin) + 1);
932         }
933
934         return 0;
935 }
936
937 static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
938                                            dm_cblock_t begin, dm_cblock_t end,
939                                            bool *result)
940 {
941         int r;
942         bool dirty_flag;
943         *result = true;
944
945         if (from_cblock(cmd->cache_blocks) == 0)
946                 /* Nothing to do */
947                 return 0;
948
949         r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
950                                    from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
951         if (r) {
952                 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
953                 return r;
954         }
955
956         r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
957         if (r) {
958                 DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
959                 dm_bitset_cursor_end(&cmd->dirty_cursor);
960                 return r;
961         }
962
963         while (begin != end) {
964                 /*
965                  * We assume that unmapped blocks have their dirty bit
966                  * cleared.
967                  */
968                 dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
969                 if (dirty_flag) {
970                         DMERR("%s: cache block %llu is dirty", __func__,
971                               (unsigned long long) from_cblock(begin));
972                         dm_bitset_cursor_end(&cmd->dirty_cursor);
973                         *result = false;
974                         return 0;
975                 }
976
977                 begin = to_cblock(from_cblock(begin) + 1);
978                 if (begin == end)
979                         break;
980
981                 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
982                 if (r) {
983                         DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
984                         dm_bitset_cursor_end(&cmd->dirty_cursor);
985                         return r;
986                 }
987         }
988
989         dm_bitset_cursor_end(&cmd->dirty_cursor);
990
991         return 0;
992 }
993
994 static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
995                                         dm_cblock_t begin, dm_cblock_t end,
996                                         bool *result)
997 {
998         if (separate_dirty_bits(cmd))
999                 return blocks_are_clean_separate_dirty(cmd, begin, end, result);
1000         else
1001                 return blocks_are_clean_combined_dirty(cmd, begin, end, result);
1002 }
1003
1004 static bool cmd_write_lock(struct dm_cache_metadata *cmd)
1005 {
1006         down_write(&cmd->root_lock);
1007         if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
1008                 up_write(&cmd->root_lock);
1009                 return false;
1010         }
1011         return true;
1012 }
1013
1014 #define WRITE_LOCK(cmd)                         \
1015         do {                                    \
1016                 if (!cmd_write_lock((cmd)))     \
1017                         return -EINVAL;         \
1018         } while (0)
1019
1020 #define WRITE_LOCK_VOID(cmd)                    \
1021         do {                                    \
1022                 if (!cmd_write_lock((cmd)))     \
1023                         return;                 \
1024         } while (0)
1025
1026 #define WRITE_UNLOCK(cmd) \
1027         up_write(&(cmd)->root_lock)
1028
1029 static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1030 {
1031         down_read(&cmd->root_lock);
1032         if (cmd->fail_io) {
1033                 up_read(&cmd->root_lock);
1034                 return false;
1035         }
1036         return true;
1037 }
1038
1039 #define READ_LOCK(cmd)                          \
1040         do {                                    \
1041                 if (!cmd_read_lock((cmd)))      \
1042                         return -EINVAL;         \
1043         } while (0)
1044
1045 #define READ_LOCK_VOID(cmd)                     \
1046         do {                                    \
1047                 if (!cmd_read_lock((cmd)))      \
1048                         return;                 \
1049         } while (0)
1050
1051 #define READ_UNLOCK(cmd) \
1052         up_read(&(cmd)->root_lock)
1053
1054 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1055 {
1056         int r;
1057         bool clean;
1058         __le64 null_mapping = pack_value(0, 0);
1059
1060         WRITE_LOCK(cmd);
1061         __dm_bless_for_disk(&null_mapping);
1062
1063         if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1064                 r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1065                 if (r) {
1066                         __dm_unbless_for_disk(&null_mapping);
1067                         goto out;
1068                 }
1069
1070                 if (!clean) {
1071                         DMERR("unable to shrink cache due to dirty blocks");
1072                         r = -EINVAL;
1073                         __dm_unbless_for_disk(&null_mapping);
1074                         goto out;
1075                 }
1076         }
1077
1078         r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1079                             from_cblock(new_cache_size),
1080                             &null_mapping, &cmd->root);
1081         if (r)
1082                 goto out;
1083
1084         if (separate_dirty_bits(cmd)) {
1085                 r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1086                                      from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1087                                      false, &cmd->dirty_root);
1088                 if (r)
1089                         goto out;
1090         }
1091
1092         cmd->cache_blocks = new_cache_size;
1093         cmd->changed = true;
1094
1095 out:
1096         WRITE_UNLOCK(cmd);
1097
1098         return r;
1099 }
1100
1101 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1102                                    sector_t discard_block_size,
1103                                    dm_dblock_t new_nr_entries)
1104 {
1105         int r;
1106
1107         WRITE_LOCK(cmd);
1108         r = dm_bitset_resize(&cmd->discard_info,
1109                              cmd->discard_root,
1110                              from_dblock(cmd->discard_nr_blocks),
1111                              from_dblock(new_nr_entries),
1112                              false, &cmd->discard_root);
1113         if (!r) {
1114                 cmd->discard_block_size = discard_block_size;
1115                 cmd->discard_nr_blocks = new_nr_entries;
1116         }
1117
1118         cmd->changed = true;
1119         WRITE_UNLOCK(cmd);
1120
1121         return r;
1122 }
1123
1124 static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1125 {
1126         return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1127                                  from_dblock(b), &cmd->discard_root);
1128 }
1129
1130 static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1131 {
1132         return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1133                                    from_dblock(b), &cmd->discard_root);
1134 }
1135
1136 static int __discard(struct dm_cache_metadata *cmd,
1137                      dm_dblock_t dblock, bool discard)
1138 {
1139         int r;
1140
1141         r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1142         if (r)
1143                 return r;
1144
1145         cmd->changed = true;
1146         return 0;
1147 }
1148
1149 int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1150                          dm_dblock_t dblock, bool discard)
1151 {
1152         int r;
1153
1154         WRITE_LOCK(cmd);
1155         r = __discard(cmd, dblock, discard);
1156         WRITE_UNLOCK(cmd);
1157
1158         return r;
1159 }
1160
1161 static int __load_discards(struct dm_cache_metadata *cmd,
1162                            load_discard_fn fn, void *context)
1163 {
1164         int r = 0;
1165         uint32_t b;
1166         struct dm_bitset_cursor c;
1167
1168         if (from_dblock(cmd->discard_nr_blocks) == 0)
1169                 /* nothing to do */
1170                 return 0;
1171
1172         if (cmd->clean_when_opened) {
1173                 r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1174                 if (r)
1175                         return r;
1176
1177                 r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1178                                            from_dblock(cmd->discard_nr_blocks), &c);
1179                 if (r)
1180                         return r;
1181
1182                 for (b = 0; ; b++) {
1183                         r = fn(context, cmd->discard_block_size, to_dblock(b),
1184                                dm_bitset_cursor_get_value(&c));
1185                         if (r)
1186                                 break;
1187
1188                         if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
1189                                 break;
1190
1191                         r = dm_bitset_cursor_next(&c);
1192                         if (r)
1193                                 break;
1194                 }
1195
1196                 dm_bitset_cursor_end(&c);
1197
1198         } else {
1199                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1200                         r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1201                         if (r)
1202                                 return r;
1203                 }
1204         }
1205
1206         return r;
1207 }
1208
1209 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1210                            load_discard_fn fn, void *context)
1211 {
1212         int r;
1213
1214         READ_LOCK(cmd);
1215         r = __load_discards(cmd, fn, context);
1216         READ_UNLOCK(cmd);
1217
1218         return r;
1219 }
1220
1221 int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
1222 {
1223         READ_LOCK(cmd);
1224         *result = cmd->cache_blocks;
1225         READ_UNLOCK(cmd);
1226
1227         return 0;
1228 }
1229
1230 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1231 {
1232         int r;
1233         __le64 value = pack_value(0, 0);
1234
1235         __dm_bless_for_disk(&value);
1236         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1237                                &value, &cmd->root);
1238         if (r)
1239                 return r;
1240
1241         cmd->changed = true;
1242         return 0;
1243 }
1244
1245 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1246 {
1247         int r;
1248
1249         WRITE_LOCK(cmd);
1250         r = __remove(cmd, cblock);
1251         WRITE_UNLOCK(cmd);
1252
1253         return r;
1254 }
1255
1256 static int __insert(struct dm_cache_metadata *cmd,
1257                     dm_cblock_t cblock, dm_oblock_t oblock)
1258 {
1259         int r;
1260         __le64 value = pack_value(oblock, M_VALID);
1261
1262         __dm_bless_for_disk(&value);
1263
1264         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1265                                &value, &cmd->root);
1266         if (r)
1267                 return r;
1268
1269         cmd->changed = true;
1270         return 0;
1271 }
1272
1273 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1274                             dm_cblock_t cblock, dm_oblock_t oblock)
1275 {
1276         int r;
1277
1278         WRITE_LOCK(cmd);
1279         r = __insert(cmd, cblock, oblock);
1280         WRITE_UNLOCK(cmd);
1281
1282         return r;
1283 }
1284
1285 static bool policy_unchanged(struct dm_cache_metadata *cmd,
1286                              struct dm_cache_policy *policy)
1287 {
1288         const char *policy_name = dm_cache_policy_get_name(policy);
1289         const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1290         size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1291
1292         /*
1293          * Ensure policy names match.
1294          */
1295         if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1296                 return false;
1297
1298         /*
1299          * Ensure policy major versions match.
1300          */
1301         if (cmd->policy_version[0] != policy_version[0])
1302                 return false;
1303
1304         /*
1305          * Ensure policy hint sizes match.
1306          */
1307         if (cmd->policy_hint_size != policy_hint_size)
1308                 return false;
1309
1310         return true;
1311 }
1312
1313 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1314 {
1315         return cmd->hint_root && cmd->policy_hint_size;
1316 }
1317
1318 static bool hints_array_available(struct dm_cache_metadata *cmd,
1319                                   struct dm_cache_policy *policy)
1320 {
1321         return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1322                 hints_array_initialized(cmd);
1323 }
1324
1325 static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1326                              uint64_t cb, bool hints_valid,
1327                              struct dm_array_cursor *mapping_cursor,
1328                              struct dm_array_cursor *hint_cursor,
1329                              load_mapping_fn fn, void *context)
1330 {
1331         int r = 0;
1332
1333         __le64 mapping;
1334         __le32 hint = 0;
1335
1336         __le64 *mapping_value_le;
1337         __le32 *hint_value_le;
1338
1339         dm_oblock_t oblock;
1340         unsigned int flags;
1341         bool dirty = true;
1342
1343         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1344         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1345         unpack_value(mapping, &oblock, &flags);
1346
1347         if (flags & M_VALID) {
1348                 if (hints_valid) {
1349                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1350                         memcpy(&hint, hint_value_le, sizeof(hint));
1351                 }
1352                 if (cmd->clean_when_opened)
1353                         dirty = flags & M_DIRTY;
1354
1355                 r = fn(context, oblock, to_cblock(cb), dirty,
1356                        le32_to_cpu(hint), hints_valid);
1357                 if (r) {
1358                         DMERR("policy couldn't load cache block %llu",
1359                               (unsigned long long) from_cblock(to_cblock(cb)));
1360                 }
1361         }
1362
1363         return r;
1364 }
1365
1366 static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1367                              uint64_t cb, bool hints_valid,
1368                              struct dm_array_cursor *mapping_cursor,
1369                              struct dm_array_cursor *hint_cursor,
1370                              struct dm_bitset_cursor *dirty_cursor,
1371                              load_mapping_fn fn, void *context)
1372 {
1373         int r = 0;
1374
1375         __le64 mapping;
1376         __le32 hint = 0;
1377
1378         __le64 *mapping_value_le;
1379         __le32 *hint_value_le;
1380
1381         dm_oblock_t oblock;
1382         unsigned int flags;
1383         bool dirty = true;
1384
1385         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1386         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1387         unpack_value(mapping, &oblock, &flags);
1388
1389         if (flags & M_VALID) {
1390                 if (hints_valid) {
1391                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1392                         memcpy(&hint, hint_value_le, sizeof(hint));
1393                 }
1394                 if (cmd->clean_when_opened)
1395                         dirty = dm_bitset_cursor_get_value(dirty_cursor);
1396
1397                 r = fn(context, oblock, to_cblock(cb), dirty,
1398                        le32_to_cpu(hint), hints_valid);
1399                 if (r) {
1400                         DMERR("policy couldn't load cache block %llu",
1401                               (unsigned long long) from_cblock(to_cblock(cb)));
1402                 }
1403         }
1404
1405         return r;
1406 }
1407
1408 static int __load_mappings(struct dm_cache_metadata *cmd,
1409                            struct dm_cache_policy *policy,
1410                            load_mapping_fn fn, void *context)
1411 {
1412         int r;
1413         uint64_t cb;
1414
1415         bool hints_valid = hints_array_available(cmd, policy);
1416
1417         if (from_cblock(cmd->cache_blocks) == 0)
1418                 /* Nothing to do */
1419                 return 0;
1420
1421         r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1422         if (r)
1423                 return r;
1424
1425         if (hints_valid) {
1426                 r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1427                 if (r) {
1428                         dm_array_cursor_end(&cmd->mapping_cursor);
1429                         return r;
1430                 }
1431         }
1432
1433         if (separate_dirty_bits(cmd)) {
1434                 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1435                                            from_cblock(cmd->cache_blocks),
1436                                            &cmd->dirty_cursor);
1437                 if (r) {
1438                         dm_array_cursor_end(&cmd->hint_cursor);
1439                         dm_array_cursor_end(&cmd->mapping_cursor);
1440                         return r;
1441                 }
1442         }
1443
1444         for (cb = 0; ; cb++) {
1445                 if (separate_dirty_bits(cmd))
1446                         r = __load_mapping_v2(cmd, cb, hints_valid,
1447                                               &cmd->mapping_cursor,
1448                                               &cmd->hint_cursor,
1449                                               &cmd->dirty_cursor,
1450                                               fn, context);
1451                 else
1452                         r = __load_mapping_v1(cmd, cb, hints_valid,
1453                                               &cmd->mapping_cursor, &cmd->hint_cursor,
1454                                               fn, context);
1455                 if (r)
1456                         goto out;
1457
1458                 /*
1459                  * We need to break out before we move the cursors.
1460                  */
1461                 if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1462                         break;
1463
1464                 r = dm_array_cursor_next(&cmd->mapping_cursor);
1465                 if (r) {
1466                         DMERR("dm_array_cursor_next for mapping failed");
1467                         goto out;
1468                 }
1469
1470                 if (hints_valid) {
1471                         r = dm_array_cursor_next(&cmd->hint_cursor);
1472                         if (r) {
1473                                 dm_array_cursor_end(&cmd->hint_cursor);
1474                                 hints_valid = false;
1475                         }
1476                 }
1477
1478                 if (separate_dirty_bits(cmd)) {
1479                         r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1480                         if (r) {
1481                                 DMERR("dm_bitset_cursor_next for dirty failed");
1482                                 goto out;
1483                         }
1484                 }
1485         }
1486 out:
1487         dm_array_cursor_end(&cmd->mapping_cursor);
1488         if (hints_valid)
1489                 dm_array_cursor_end(&cmd->hint_cursor);
1490
1491         if (separate_dirty_bits(cmd))
1492                 dm_bitset_cursor_end(&cmd->dirty_cursor);
1493
1494         return r;
1495 }
1496
1497 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1498                            struct dm_cache_policy *policy,
1499                            load_mapping_fn fn, void *context)
1500 {
1501         int r;
1502
1503         READ_LOCK(cmd);
1504         r = __load_mappings(cmd, policy, fn, context);
1505         READ_UNLOCK(cmd);
1506
1507         return r;
1508 }
1509
1510 static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1511 {
1512         __le64 value;
1513         dm_oblock_t oblock;
1514         unsigned int flags;
1515
1516         memcpy(&value, leaf, sizeof(value));
1517         unpack_value(value, &oblock, &flags);
1518
1519         return 0;
1520 }
1521
1522 static int __dump_mappings(struct dm_cache_metadata *cmd)
1523 {
1524         return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1525 }
1526
1527 void dm_cache_dump(struct dm_cache_metadata *cmd)
1528 {
1529         READ_LOCK_VOID(cmd);
1530         __dump_mappings(cmd);
1531         READ_UNLOCK(cmd);
1532 }
1533
1534 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1535 {
1536         int r;
1537
1538         READ_LOCK(cmd);
1539         r = cmd->changed;
1540         READ_UNLOCK(cmd);
1541
1542         return r;
1543 }
1544
1545 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1546 {
1547         int r;
1548         unsigned int flags;
1549         dm_oblock_t oblock;
1550         __le64 value;
1551
1552         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1553         if (r)
1554                 return r;
1555
1556         unpack_value(value, &oblock, &flags);
1557
1558         if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1559                 /* nothing to be done */
1560                 return 0;
1561
1562         value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1563         __dm_bless_for_disk(&value);
1564
1565         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1566                                &value, &cmd->root);
1567         if (r)
1568                 return r;
1569
1570         cmd->changed = true;
1571         return 0;
1572
1573 }
1574
1575 static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1576 {
1577         int r;
1578         unsigned int i;
1579
1580         for (i = 0; i < nr_bits; i++) {
1581                 r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1582                 if (r)
1583                         return r;
1584         }
1585
1586         return 0;
1587 }
1588
1589 static int is_dirty_callback(uint32_t index, bool *value, void *context)
1590 {
1591         unsigned long *bits = context;
1592         *value = test_bit(index, bits);
1593         return 0;
1594 }
1595
1596 static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1597 {
1598         int r = 0;
1599
1600         /* nr_bits is really just a sanity check */
1601         if (nr_bits != from_cblock(cmd->cache_blocks)) {
1602                 DMERR("dirty bitset is wrong size");
1603                 return -EINVAL;
1604         }
1605
1606         r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1607         if (r)
1608                 return r;
1609
1610         cmd->changed = true;
1611         return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1612 }
1613
1614 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1615                             unsigned int nr_bits,
1616                             unsigned long *bits)
1617 {
1618         int r;
1619
1620         WRITE_LOCK(cmd);
1621         if (separate_dirty_bits(cmd))
1622                 r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1623         else
1624                 r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1625         WRITE_UNLOCK(cmd);
1626
1627         return r;
1628 }
1629
1630 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1631                                  struct dm_cache_statistics *stats)
1632 {
1633         READ_LOCK_VOID(cmd);
1634         *stats = cmd->stats;
1635         READ_UNLOCK(cmd);
1636 }
1637
1638 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1639                                  struct dm_cache_statistics *stats)
1640 {
1641         WRITE_LOCK_VOID(cmd);
1642         cmd->stats = *stats;
1643         WRITE_UNLOCK(cmd);
1644 }
1645
1646 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1647 {
1648         int r = -EINVAL;
1649         flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1650                                  clear_clean_shutdown);
1651
1652         WRITE_LOCK(cmd);
1653         if (cmd->fail_io)
1654                 goto out;
1655
1656         r = __commit_transaction(cmd, mutator);
1657         if (r)
1658                 goto out;
1659
1660         r = __begin_transaction(cmd);
1661 out:
1662         WRITE_UNLOCK(cmd);
1663         return r;
1664 }
1665
1666 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1667                                            dm_block_t *result)
1668 {
1669         int r = -EINVAL;
1670
1671         READ_LOCK(cmd);
1672         if (!cmd->fail_io)
1673                 r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1674         READ_UNLOCK(cmd);
1675
1676         return r;
1677 }
1678
1679 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1680                                    dm_block_t *result)
1681 {
1682         int r = -EINVAL;
1683
1684         READ_LOCK(cmd);
1685         if (!cmd->fail_io)
1686                 r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1687         READ_UNLOCK(cmd);
1688
1689         return r;
1690 }
1691
1692 /*----------------------------------------------------------------*/
1693
1694 static int get_hint(uint32_t index, void *value_le, void *context)
1695 {
1696         uint32_t value;
1697         struct dm_cache_policy *policy = context;
1698
1699         value = policy_get_hint(policy, to_cblock(index));
1700         *((__le32 *) value_le) = cpu_to_le32(value);
1701
1702         return 0;
1703 }
1704
1705 /*
1706  * It's quicker to always delete the hint array, and recreate with
1707  * dm_array_new().
1708  */
1709 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1710 {
1711         int r;
1712         size_t hint_size;
1713         const char *policy_name = dm_cache_policy_get_name(policy);
1714         const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1715
1716         if (!policy_name[0] ||
1717             (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1718                 return -EINVAL;
1719
1720         strscpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1721         memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1722
1723         hint_size = dm_cache_policy_get_hint_size(policy);
1724         if (!hint_size)
1725                 return 0; /* short-circuit hints initialization */
1726         cmd->policy_hint_size = hint_size;
1727
1728         if (cmd->hint_root) {
1729                 r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1730                 if (r)
1731                         return r;
1732         }
1733
1734         return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1735                             from_cblock(cmd->cache_blocks),
1736                             get_hint, policy);
1737 }
1738
1739 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1740 {
1741         int r;
1742
1743         WRITE_LOCK(cmd);
1744         r = write_hints(cmd, policy);
1745         WRITE_UNLOCK(cmd);
1746
1747         return r;
1748 }
1749
1750 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1751 {
1752         int r;
1753
1754         READ_LOCK(cmd);
1755         r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1756         READ_UNLOCK(cmd);
1757
1758         return r;
1759 }
1760
1761 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1762 {
1763         WRITE_LOCK_VOID(cmd);
1764         dm_bm_set_read_only(cmd->bm);
1765         WRITE_UNLOCK(cmd);
1766 }
1767
1768 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1769 {
1770         WRITE_LOCK_VOID(cmd);
1771         dm_bm_set_read_write(cmd->bm);
1772         WRITE_UNLOCK(cmd);
1773 }
1774
1775 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1776 {
1777         int r;
1778         struct dm_block *sblock;
1779         struct cache_disk_superblock *disk_super;
1780
1781         WRITE_LOCK(cmd);
1782         set_bit(NEEDS_CHECK, &cmd->flags);
1783
1784         r = superblock_lock(cmd, &sblock);
1785         if (r) {
1786                 DMERR("couldn't read superblock");
1787                 goto out;
1788         }
1789
1790         disk_super = dm_block_data(sblock);
1791         disk_super->flags = cpu_to_le32(cmd->flags);
1792
1793         dm_bm_unlock(sblock);
1794
1795 out:
1796         WRITE_UNLOCK(cmd);
1797         return r;
1798 }
1799
1800 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1801 {
1802         READ_LOCK(cmd);
1803         *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1804         READ_UNLOCK(cmd);
1805
1806         return 0;
1807 }
1808
1809 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1810 {
1811         int r = -EINVAL;
1812         struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
1813
1814         /* fail_io is double-checked with cmd->root_lock held below */
1815         if (unlikely(cmd->fail_io))
1816                 return r;
1817
1818         /*
1819          * Replacement block manager (new_bm) is created and old_bm destroyed outside of
1820          * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
1821          * shrinker associated with the block manager's bufio client vs cmd root_lock).
1822          * - must take shrinker_mutex without holding cmd->root_lock
1823          */
1824         new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
1825                                          CACHE_MAX_CONCURRENT_LOCKS);
1826
1827         WRITE_LOCK(cmd);
1828         if (cmd->fail_io) {
1829                 WRITE_UNLOCK(cmd);
1830                 goto out;
1831         }
1832
1833         __destroy_persistent_data_objects(cmd, false);
1834         old_bm = cmd->bm;
1835         if (IS_ERR(new_bm)) {
1836                 DMERR("could not create block manager during abort");
1837                 cmd->bm = NULL;
1838                 r = PTR_ERR(new_bm);
1839                 goto out_unlock;
1840         }
1841
1842         cmd->bm = new_bm;
1843         r = __open_or_format_metadata(cmd, false);
1844         if (r) {
1845                 cmd->bm = NULL;
1846                 goto out_unlock;
1847         }
1848         new_bm = NULL;
1849 out_unlock:
1850         if (r)
1851                 cmd->fail_io = true;
1852         WRITE_UNLOCK(cmd);
1853         dm_block_manager_destroy(old_bm);
1854 out:
1855         if (new_bm && !IS_ERR(new_bm))
1856                 dm_block_manager_destroy(new_bm);
1857
1858         return r;
1859 }
This page took 0.156964 seconds and 4 git commands to generate.