]> Git Repo - J-linux.git/blob - drivers/md/dm-cache-metadata.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / md / dm-cache-metadata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm-cache-metadata.h"
9
10 #include "persistent-data/dm-array.h"
11 #include "persistent-data/dm-bitset.h"
12 #include "persistent-data/dm-space-map.h"
13 #include "persistent-data/dm-space-map-disk.h"
14 #include "persistent-data/dm-transaction-manager.h"
15
16 #include <linux/device-mapper.h>
17 #include <linux/refcount.h>
18
19 /*----------------------------------------------------------------*/
20
21 #define DM_MSG_PREFIX   "cache metadata"
22
23 #define CACHE_SUPERBLOCK_MAGIC 06142003
24 #define CACHE_SUPERBLOCK_LOCATION 0
25
26 /*
27  * defines a range of metadata versions that this module can handle.
28  */
29 #define MIN_CACHE_VERSION 1
30 #define MAX_CACHE_VERSION 2
31
32 /*
33  *  3 for btree insert +
34  *  2 for btree lookup used within space map
35  */
36 #define CACHE_MAX_CONCURRENT_LOCKS 5
37 #define SPACE_MAP_ROOT_SIZE 128
38
39 enum superblock_flag_bits {
40         /* for spotting crashes that would invalidate the dirty bitset */
41         CLEAN_SHUTDOWN,
42         /* metadata must be checked using the tools */
43         NEEDS_CHECK,
44 };
45
46 /*
47  * Each mapping from cache block -> origin block carries a set of flags.
48  */
49 enum mapping_bits {
50         /*
51          * A valid mapping.  Because we're using an array we clear this
52          * flag for an non existant mapping.
53          */
54         M_VALID = 1,
55
56         /*
57          * The data on the cache is different from that on the origin.
58          * This flag is only used by metadata format 1.
59          */
60         M_DIRTY = 2
61 };
62
63 struct cache_disk_superblock {
64         __le32 csum;
65         __le32 flags;
66         __le64 blocknr;
67
68         __u8 uuid[16];
69         __le64 magic;
70         __le32 version;
71
72         __u8 policy_name[CACHE_POLICY_NAME_SIZE];
73         __le32 policy_hint_size;
74
75         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
76         __le64 mapping_root;
77         __le64 hint_root;
78
79         __le64 discard_root;
80         __le64 discard_block_size;
81         __le64 discard_nr_blocks;
82
83         __le32 data_block_size;
84         __le32 metadata_block_size;
85         __le32 cache_blocks;
86
87         __le32 compat_flags;
88         __le32 compat_ro_flags;
89         __le32 incompat_flags;
90
91         __le32 read_hits;
92         __le32 read_misses;
93         __le32 write_hits;
94         __le32 write_misses;
95
96         __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
97
98         /*
99          * Metadata format 2 fields.
100          */
101         __le64 dirty_root;
102 } __packed;
103
104 struct dm_cache_metadata {
105         refcount_t ref_count;
106         struct list_head list;
107
108         unsigned int version;
109         struct block_device *bdev;
110         struct dm_block_manager *bm;
111         struct dm_space_map *metadata_sm;
112         struct dm_transaction_manager *tm;
113
114         struct dm_array_info info;
115         struct dm_array_info hint_info;
116         struct dm_disk_bitset discard_info;
117
118         struct rw_semaphore root_lock;
119         unsigned long flags;
120         dm_block_t root;
121         dm_block_t hint_root;
122         dm_block_t discard_root;
123
124         sector_t discard_block_size;
125         dm_dblock_t discard_nr_blocks;
126
127         sector_t data_block_size;
128         dm_cblock_t cache_blocks;
129         bool changed:1;
130         bool clean_when_opened:1;
131
132         char policy_name[CACHE_POLICY_NAME_SIZE];
133         unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
134         size_t policy_hint_size;
135         struct dm_cache_statistics stats;
136
137         /*
138          * Reading the space map root can fail, so we read it into this
139          * buffer before the superblock is locked and updated.
140          */
141         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
142
143         /*
144          * Set if a transaction has to be aborted but the attempt to roll
145          * back to the previous (good) transaction failed.  The only
146          * metadata operation permissible in this state is the closing of
147          * the device.
148          */
149         bool fail_io:1;
150
151         /*
152          * Metadata format 2 fields.
153          */
154         dm_block_t dirty_root;
155         struct dm_disk_bitset dirty_info;
156
157         /*
158          * These structures are used when loading metadata.  They're too
159          * big to put on the stack.
160          */
161         struct dm_array_cursor mapping_cursor;
162         struct dm_array_cursor hint_cursor;
163         struct dm_bitset_cursor dirty_cursor;
164 };
165
166 /*
167  *-----------------------------------------------------------------
168  * superblock validator
169  *-----------------------------------------------------------------
170  */
171 #define SUPERBLOCK_CSUM_XOR 9031977
172
173 static void sb_prepare_for_write(const struct dm_block_validator *v,
174                                  struct dm_block *b,
175                                  size_t sb_block_size)
176 {
177         struct cache_disk_superblock *disk_super = dm_block_data(b);
178
179         disk_super->blocknr = cpu_to_le64(dm_block_location(b));
180         disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
181                                                       sb_block_size - sizeof(__le32),
182                                                       SUPERBLOCK_CSUM_XOR));
183 }
184
185 static int check_metadata_version(struct cache_disk_superblock *disk_super)
186 {
187         uint32_t metadata_version = le32_to_cpu(disk_super->version);
188
189         if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
190                 DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
191                       metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
192                 return -EINVAL;
193         }
194
195         return 0;
196 }
197
198 static int sb_check(const struct dm_block_validator *v,
199                     struct dm_block *b,
200                     size_t sb_block_size)
201 {
202         struct cache_disk_superblock *disk_super = dm_block_data(b);
203         __le32 csum_le;
204
205         if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
206                 DMERR("%s failed: blocknr %llu: wanted %llu",
207                       __func__, le64_to_cpu(disk_super->blocknr),
208                       (unsigned long long)dm_block_location(b));
209                 return -ENOTBLK;
210         }
211
212         if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
213                 DMERR("%s failed: magic %llu: wanted %llu",
214                       __func__, le64_to_cpu(disk_super->magic),
215                       (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
216                 return -EILSEQ;
217         }
218
219         csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
220                                              sb_block_size - sizeof(__le32),
221                                              SUPERBLOCK_CSUM_XOR));
222         if (csum_le != disk_super->csum) {
223                 DMERR("%s failed: csum %u: wanted %u",
224                       __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
225                 return -EILSEQ;
226         }
227
228         return check_metadata_version(disk_super);
229 }
230
231 static const struct dm_block_validator sb_validator = {
232         .name = "superblock",
233         .prepare_for_write = sb_prepare_for_write,
234         .check = sb_check
235 };
236
237 /*----------------------------------------------------------------*/
238
239 static int superblock_read_lock(struct dm_cache_metadata *cmd,
240                                 struct dm_block **sblock)
241 {
242         return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
243                                &sb_validator, sblock);
244 }
245
246 static int superblock_lock_zero(struct dm_cache_metadata *cmd,
247                                 struct dm_block **sblock)
248 {
249         return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
250                                      &sb_validator, sblock);
251 }
252
253 static int superblock_lock(struct dm_cache_metadata *cmd,
254                            struct dm_block **sblock)
255 {
256         return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
257                                 &sb_validator, sblock);
258 }
259
260 /*----------------------------------------------------------------*/
261
262 static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
263 {
264         int r;
265         unsigned int i;
266         struct dm_block *b;
267         __le64 *data_le, zero = cpu_to_le64(0);
268         unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
269
270         /*
271          * We can't use a validator here - it may be all zeroes.
272          */
273         r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
274         if (r)
275                 return r;
276
277         data_le = dm_block_data(b);
278         *result = true;
279         for (i = 0; i < sb_block_size; i++) {
280                 if (data_le[i] != zero) {
281                         *result = false;
282                         break;
283                 }
284         }
285
286         dm_bm_unlock(b);
287
288         return 0;
289 }
290
291 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
292 {
293         struct dm_btree_value_type vt;
294
295         vt.context = NULL;
296         vt.size = sizeof(__le64);
297         vt.inc = NULL;
298         vt.dec = NULL;
299         vt.equal = NULL;
300         dm_array_info_init(&cmd->info, cmd->tm, &vt);
301
302         if (cmd->policy_hint_size) {
303                 vt.size = sizeof(__le32);
304                 dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
305         }
306 }
307
308 static int __save_sm_root(struct dm_cache_metadata *cmd)
309 {
310         int r;
311         size_t metadata_len;
312
313         r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
314         if (r < 0)
315                 return r;
316
317         return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
318                                metadata_len);
319 }
320
321 static void __copy_sm_root(struct dm_cache_metadata *cmd,
322                            struct cache_disk_superblock *disk_super)
323 {
324         memcpy(&disk_super->metadata_space_map_root,
325                &cmd->metadata_space_map_root,
326                sizeof(cmd->metadata_space_map_root));
327 }
328
329 static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
330 {
331         return cmd->version >= 2;
332 }
333
334 static int __write_initial_superblock(struct dm_cache_metadata *cmd)
335 {
336         int r;
337         struct dm_block *sblock;
338         struct cache_disk_superblock *disk_super;
339         sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
340
341         /* FIXME: see if we can lose the max sectors limit */
342         if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
343                 bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
344
345         r = dm_tm_pre_commit(cmd->tm);
346         if (r < 0)
347                 return r;
348
349         /*
350          * dm_sm_copy_root() can fail.  So we need to do it before we start
351          * updating the superblock.
352          */
353         r = __save_sm_root(cmd);
354         if (r)
355                 return r;
356
357         r = superblock_lock_zero(cmd, &sblock);
358         if (r)
359                 return r;
360
361         disk_super = dm_block_data(sblock);
362         disk_super->flags = 0;
363         memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
364         disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
365         disk_super->version = cpu_to_le32(cmd->version);
366         memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
367         memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
368         disk_super->policy_hint_size = cpu_to_le32(0);
369
370         __copy_sm_root(cmd, disk_super);
371
372         disk_super->mapping_root = cpu_to_le64(cmd->root);
373         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
374         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
375         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
376         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
377         disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
378         disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
379         disk_super->cache_blocks = cpu_to_le32(0);
380
381         disk_super->read_hits = cpu_to_le32(0);
382         disk_super->read_misses = cpu_to_le32(0);
383         disk_super->write_hits = cpu_to_le32(0);
384         disk_super->write_misses = cpu_to_le32(0);
385
386         if (separate_dirty_bits(cmd))
387                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
388
389         return dm_tm_commit(cmd->tm, sblock);
390 }
391
392 static int __format_metadata(struct dm_cache_metadata *cmd)
393 {
394         int r;
395
396         r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
397                                  &cmd->tm, &cmd->metadata_sm);
398         if (r < 0) {
399                 DMERR("tm_create_with_sm failed");
400                 return r;
401         }
402
403         __setup_mapping_info(cmd);
404
405         r = dm_array_empty(&cmd->info, &cmd->root);
406         if (r < 0)
407                 goto bad;
408
409         if (separate_dirty_bits(cmd)) {
410                 dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
411                 r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
412                 if (r < 0)
413                         goto bad;
414         }
415
416         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
417         r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
418         if (r < 0)
419                 goto bad;
420
421         cmd->discard_block_size = 0;
422         cmd->discard_nr_blocks = 0;
423
424         r = __write_initial_superblock(cmd);
425         if (r)
426                 goto bad;
427
428         cmd->clean_when_opened = true;
429         return 0;
430
431 bad:
432         dm_tm_destroy(cmd->tm);
433         dm_sm_destroy(cmd->metadata_sm);
434
435         return r;
436 }
437
438 static int __check_incompat_features(struct cache_disk_superblock *disk_super,
439                                      struct dm_cache_metadata *cmd)
440 {
441         uint32_t incompat_flags, features;
442
443         incompat_flags = le32_to_cpu(disk_super->incompat_flags);
444         features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
445         if (features) {
446                 DMERR("could not access metadata due to unsupported optional features (%lx).",
447                       (unsigned long)features);
448                 return -EINVAL;
449         }
450
451         /*
452          * Check for read-only metadata to skip the following RDWR checks.
453          */
454         if (bdev_read_only(cmd->bdev))
455                 return 0;
456
457         features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
458         if (features) {
459                 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
460                       (unsigned long)features);
461                 return -EINVAL;
462         }
463
464         return 0;
465 }
466
467 static int __open_metadata(struct dm_cache_metadata *cmd)
468 {
469         int r;
470         struct dm_block *sblock;
471         struct cache_disk_superblock *disk_super;
472         unsigned long sb_flags;
473
474         r = superblock_read_lock(cmd, &sblock);
475         if (r < 0) {
476                 DMERR("couldn't read lock superblock");
477                 return r;
478         }
479
480         disk_super = dm_block_data(sblock);
481
482         /* Verify the data block size hasn't changed */
483         if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
484                 DMERR("changing the data block size (from %u to %llu) is not supported",
485                       le32_to_cpu(disk_super->data_block_size),
486                       (unsigned long long)cmd->data_block_size);
487                 r = -EINVAL;
488                 goto bad;
489         }
490
491         r = __check_incompat_features(disk_super, cmd);
492         if (r < 0)
493                 goto bad;
494
495         r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
496                                disk_super->metadata_space_map_root,
497                                sizeof(disk_super->metadata_space_map_root),
498                                &cmd->tm, &cmd->metadata_sm);
499         if (r < 0) {
500                 DMERR("tm_open_with_sm failed");
501                 goto bad;
502         }
503
504         __setup_mapping_info(cmd);
505         dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
506         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
507         sb_flags = le32_to_cpu(disk_super->flags);
508         cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
509         dm_bm_unlock(sblock);
510
511         return 0;
512
513 bad:
514         dm_bm_unlock(sblock);
515         return r;
516 }
517
518 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
519                                      bool format_device)
520 {
521         int r;
522         bool unformatted = false;
523
524         r = __superblock_all_zeroes(cmd->bm, &unformatted);
525         if (r)
526                 return r;
527
528         if (unformatted)
529                 return format_device ? __format_metadata(cmd) : -EPERM;
530
531         return __open_metadata(cmd);
532 }
533
534 static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
535                                             bool may_format_device)
536 {
537         int r;
538
539         cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
540                                           CACHE_MAX_CONCURRENT_LOCKS);
541         if (IS_ERR(cmd->bm)) {
542                 DMERR("could not create block manager");
543                 r = PTR_ERR(cmd->bm);
544                 cmd->bm = NULL;
545                 return r;
546         }
547
548         r = __open_or_format_metadata(cmd, may_format_device);
549         if (r) {
550                 dm_block_manager_destroy(cmd->bm);
551                 cmd->bm = NULL;
552         }
553
554         return r;
555 }
556
557 static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd,
558                                               bool destroy_bm)
559 {
560         dm_sm_destroy(cmd->metadata_sm);
561         dm_tm_destroy(cmd->tm);
562         if (destroy_bm)
563                 dm_block_manager_destroy(cmd->bm);
564 }
565
566 typedef unsigned long (*flags_mutator)(unsigned long);
567
568 static void update_flags(struct cache_disk_superblock *disk_super,
569                          flags_mutator mutator)
570 {
571         uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
572
573         disk_super->flags = cpu_to_le32(sb_flags);
574 }
575
576 static unsigned long set_clean_shutdown(unsigned long flags)
577 {
578         set_bit(CLEAN_SHUTDOWN, &flags);
579         return flags;
580 }
581
582 static unsigned long clear_clean_shutdown(unsigned long flags)
583 {
584         clear_bit(CLEAN_SHUTDOWN, &flags);
585         return flags;
586 }
587
588 static void read_superblock_fields(struct dm_cache_metadata *cmd,
589                                    struct cache_disk_superblock *disk_super)
590 {
591         cmd->version = le32_to_cpu(disk_super->version);
592         cmd->flags = le32_to_cpu(disk_super->flags);
593         cmd->root = le64_to_cpu(disk_super->mapping_root);
594         cmd->hint_root = le64_to_cpu(disk_super->hint_root);
595         cmd->discard_root = le64_to_cpu(disk_super->discard_root);
596         cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
597         cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
598         cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
599         cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
600         strscpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
601         cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
602         cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
603         cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
604         cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
605
606         cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
607         cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
608         cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
609         cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
610
611         if (separate_dirty_bits(cmd))
612                 cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
613
614         cmd->changed = false;
615 }
616
617 /*
618  * The mutator updates the superblock flags.
619  */
620 static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
621                                      flags_mutator mutator)
622 {
623         int r;
624         struct cache_disk_superblock *disk_super;
625         struct dm_block *sblock;
626
627         r = superblock_lock(cmd, &sblock);
628         if (r)
629                 return r;
630
631         disk_super = dm_block_data(sblock);
632         update_flags(disk_super, mutator);
633         read_superblock_fields(cmd, disk_super);
634         dm_bm_unlock(sblock);
635
636         return dm_bm_flush(cmd->bm);
637 }
638
639 static int __begin_transaction(struct dm_cache_metadata *cmd)
640 {
641         int r;
642         struct cache_disk_superblock *disk_super;
643         struct dm_block *sblock;
644
645         /*
646          * We re-read the superblock every time.  Shouldn't need to do this
647          * really.
648          */
649         r = superblock_read_lock(cmd, &sblock);
650         if (r)
651                 return r;
652
653         disk_super = dm_block_data(sblock);
654         read_superblock_fields(cmd, disk_super);
655         dm_bm_unlock(sblock);
656
657         return 0;
658 }
659
660 static int __commit_transaction(struct dm_cache_metadata *cmd,
661                                 flags_mutator mutator)
662 {
663         int r;
664         struct cache_disk_superblock *disk_super;
665         struct dm_block *sblock;
666
667         /*
668          * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
669          */
670         BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
671
672         if (separate_dirty_bits(cmd)) {
673                 r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
674                                     &cmd->dirty_root);
675                 if (r)
676                         return r;
677         }
678
679         r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
680                             &cmd->discard_root);
681         if (r)
682                 return r;
683
684         r = dm_tm_pre_commit(cmd->tm);
685         if (r < 0)
686                 return r;
687
688         r = __save_sm_root(cmd);
689         if (r)
690                 return r;
691
692         r = superblock_lock(cmd, &sblock);
693         if (r)
694                 return r;
695
696         disk_super = dm_block_data(sblock);
697
698         disk_super->flags = cpu_to_le32(cmd->flags);
699         if (mutator)
700                 update_flags(disk_super, mutator);
701
702         disk_super->mapping_root = cpu_to_le64(cmd->root);
703         if (separate_dirty_bits(cmd))
704                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
705         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
706         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
707         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
708         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
709         disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
710         strscpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
711         disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
712         disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
713         disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
714         disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
715
716         disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
717         disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
718         disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
719         disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
720         __copy_sm_root(cmd, disk_super);
721
722         return dm_tm_commit(cmd->tm, sblock);
723 }
724
725 /*----------------------------------------------------------------*/
726
727 /*
728  * The mappings are held in a dm-array that has 64-bit values stored in
729  * little-endian format.  The index is the cblock, the high 48bits of the
730  * value are the oblock and the low 16 bit the flags.
731  */
732 #define FLAGS_MASK ((1 << 16) - 1)
733
734 static __le64 pack_value(dm_oblock_t block, unsigned int flags)
735 {
736         uint64_t value = from_oblock(block);
737
738         value <<= 16;
739         value = value | (flags & FLAGS_MASK);
740         return cpu_to_le64(value);
741 }
742
743 static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
744 {
745         uint64_t value = le64_to_cpu(value_le);
746         uint64_t b = value >> 16;
747
748         *block = to_oblock(b);
749         *flags = value & FLAGS_MASK;
750 }
751
752 /*----------------------------------------------------------------*/
753
754 static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
755                                                sector_t data_block_size,
756                                                bool may_format_device,
757                                                size_t policy_hint_size,
758                                                unsigned int metadata_version)
759 {
760         int r;
761         struct dm_cache_metadata *cmd;
762
763         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
764         if (!cmd) {
765                 DMERR("could not allocate metadata struct");
766                 return ERR_PTR(-ENOMEM);
767         }
768
769         cmd->version = metadata_version;
770         refcount_set(&cmd->ref_count, 1);
771         init_rwsem(&cmd->root_lock);
772         cmd->bdev = bdev;
773         cmd->data_block_size = data_block_size;
774         cmd->cache_blocks = 0;
775         cmd->policy_hint_size = policy_hint_size;
776         cmd->changed = true;
777         cmd->fail_io = false;
778
779         r = __create_persistent_data_objects(cmd, may_format_device);
780         if (r) {
781                 kfree(cmd);
782                 return ERR_PTR(r);
783         }
784
785         r = __begin_transaction_flags(cmd, clear_clean_shutdown);
786         if (r < 0) {
787                 dm_cache_metadata_close(cmd);
788                 return ERR_PTR(r);
789         }
790
791         return cmd;
792 }
793
794 /*
795  * We keep a little list of ref counted metadata objects to prevent two
796  * different target instances creating separate bufio instances.  This is
797  * an issue if a table is reloaded before the suspend.
798  */
799 static DEFINE_MUTEX(table_lock);
800 static LIST_HEAD(table);
801
802 static struct dm_cache_metadata *lookup(struct block_device *bdev)
803 {
804         struct dm_cache_metadata *cmd;
805
806         list_for_each_entry(cmd, &table, list)
807                 if (cmd->bdev == bdev) {
808                         refcount_inc(&cmd->ref_count);
809                         return cmd;
810                 }
811
812         return NULL;
813 }
814
815 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
816                                                 sector_t data_block_size,
817                                                 bool may_format_device,
818                                                 size_t policy_hint_size,
819                                                 unsigned int metadata_version)
820 {
821         struct dm_cache_metadata *cmd, *cmd2;
822
823         mutex_lock(&table_lock);
824         cmd = lookup(bdev);
825         mutex_unlock(&table_lock);
826
827         if (cmd)
828                 return cmd;
829
830         cmd = metadata_open(bdev, data_block_size, may_format_device,
831                             policy_hint_size, metadata_version);
832         if (!IS_ERR(cmd)) {
833                 mutex_lock(&table_lock);
834                 cmd2 = lookup(bdev);
835                 if (cmd2) {
836                         mutex_unlock(&table_lock);
837                         __destroy_persistent_data_objects(cmd, true);
838                         kfree(cmd);
839                         return cmd2;
840                 }
841                 list_add(&cmd->list, &table);
842                 mutex_unlock(&table_lock);
843         }
844
845         return cmd;
846 }
847
848 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
849 {
850         if (cmd->data_block_size != data_block_size) {
851                 DMERR("data_block_size (%llu) different from that in metadata (%llu)",
852                       (unsigned long long) data_block_size,
853                       (unsigned long long) cmd->data_block_size);
854                 return false;
855         }
856
857         return true;
858 }
859
860 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
861                                                  sector_t data_block_size,
862                                                  bool may_format_device,
863                                                  size_t policy_hint_size,
864                                                  unsigned int metadata_version)
865 {
866         struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
867                                                        policy_hint_size, metadata_version);
868
869         if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
870                 dm_cache_metadata_close(cmd);
871                 return ERR_PTR(-EINVAL);
872         }
873
874         return cmd;
875 }
876
877 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
878 {
879         if (refcount_dec_and_test(&cmd->ref_count)) {
880                 mutex_lock(&table_lock);
881                 list_del(&cmd->list);
882                 mutex_unlock(&table_lock);
883
884                 if (!cmd->fail_io)
885                         __destroy_persistent_data_objects(cmd, true);
886                 kfree(cmd);
887         }
888 }
889
890 /*
891  * Checks that the given cache block is either unmapped or clean.
892  */
893 static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
894                                       bool *result)
895 {
896         int r;
897         __le64 value;
898         dm_oblock_t ob;
899         unsigned int flags;
900
901         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
902         if (r)
903                 return r;
904
905         unpack_value(value, &ob, &flags);
906         *result = !((flags & M_VALID) && (flags & M_DIRTY));
907
908         return 0;
909 }
910
911 static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
912                                            dm_cblock_t begin, dm_cblock_t end,
913                                            bool *result)
914 {
915         int r;
916         *result = true;
917
918         while (begin != end) {
919                 r = block_clean_combined_dirty(cmd, begin, result);
920                 if (r) {
921                         DMERR("block_clean_combined_dirty failed");
922                         return r;
923                 }
924
925                 if (!*result) {
926                         DMERR("cache block %llu is dirty",
927                               (unsigned long long) from_cblock(begin));
928                         return 0;
929                 }
930
931                 begin = to_cblock(from_cblock(begin) + 1);
932         }
933
934         return 0;
935 }
936
937 static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
938                                            dm_cblock_t begin, dm_cblock_t end,
939                                            bool *result)
940 {
941         int r;
942         bool dirty_flag;
943         *result = true;
944
945         if (from_cblock(cmd->cache_blocks) == 0)
946                 /* Nothing to do */
947                 return 0;
948
949         r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
950                                    from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
951         if (r) {
952                 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
953                 return r;
954         }
955
956         r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
957         if (r) {
958                 DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
959                 dm_bitset_cursor_end(&cmd->dirty_cursor);
960                 return r;
961         }
962
963         while (begin != end) {
964                 /*
965                  * We assume that unmapped blocks have their dirty bit
966                  * cleared.
967                  */
968                 dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
969                 if (dirty_flag) {
970                         DMERR("%s: cache block %llu is dirty", __func__,
971                               (unsigned long long) from_cblock(begin));
972                         dm_bitset_cursor_end(&cmd->dirty_cursor);
973                         *result = false;
974                         return 0;
975                 }
976
977                 begin = to_cblock(from_cblock(begin) + 1);
978                 if (begin == end)
979                         break;
980
981                 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
982                 if (r) {
983                         DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
984                         dm_bitset_cursor_end(&cmd->dirty_cursor);
985                         return r;
986                 }
987         }
988
989         dm_bitset_cursor_end(&cmd->dirty_cursor);
990
991         return 0;
992 }
993
994 static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
995                                         dm_cblock_t begin, dm_cblock_t end,
996                                         bool *result)
997 {
998         if (separate_dirty_bits(cmd))
999                 return blocks_are_clean_separate_dirty(cmd, begin, end, result);
1000         else
1001                 return blocks_are_clean_combined_dirty(cmd, begin, end, result);
1002 }
1003
1004 static bool cmd_write_lock(struct dm_cache_metadata *cmd)
1005 {
1006         down_write(&cmd->root_lock);
1007         if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
1008                 up_write(&cmd->root_lock);
1009                 return false;
1010         }
1011         return true;
1012 }
1013
1014 #define WRITE_LOCK(cmd)                         \
1015         do {                                    \
1016                 if (!cmd_write_lock((cmd)))     \
1017                         return -EINVAL;         \
1018         } while (0)
1019
1020 #define WRITE_LOCK_VOID(cmd)                    \
1021         do {                                    \
1022                 if (!cmd_write_lock((cmd)))     \
1023                         return;                 \
1024         } while (0)
1025
1026 #define WRITE_UNLOCK(cmd) \
1027         up_write(&(cmd)->root_lock)
1028
1029 static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1030 {
1031         down_read(&cmd->root_lock);
1032         if (cmd->fail_io) {
1033                 up_read(&cmd->root_lock);
1034                 return false;
1035         }
1036         return true;
1037 }
1038
1039 #define READ_LOCK(cmd)                          \
1040         do {                                    \
1041                 if (!cmd_read_lock((cmd)))      \
1042                         return -EINVAL;         \
1043         } while (0)
1044
1045 #define READ_LOCK_VOID(cmd)                     \
1046         do {                                    \
1047                 if (!cmd_read_lock((cmd)))      \
1048                         return;                 \
1049         } while (0)
1050
1051 #define READ_UNLOCK(cmd) \
1052         up_read(&(cmd)->root_lock)
1053
1054 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1055 {
1056         int r;
1057         bool clean;
1058         __le64 null_mapping = pack_value(0, 0);
1059
1060         WRITE_LOCK(cmd);
1061         __dm_bless_for_disk(&null_mapping);
1062
1063         if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1064                 r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1065                 if (r) {
1066                         __dm_unbless_for_disk(&null_mapping);
1067                         goto out;
1068                 }
1069
1070                 if (!clean) {
1071                         DMERR("unable to shrink cache due to dirty blocks");
1072                         r = -EINVAL;
1073                         __dm_unbless_for_disk(&null_mapping);
1074                         goto out;
1075                 }
1076         }
1077
1078         r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1079                             from_cblock(new_cache_size),
1080                             &null_mapping, &cmd->root);
1081         if (r)
1082                 goto out;
1083
1084         if (separate_dirty_bits(cmd)) {
1085                 r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1086                                      from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1087                                      false, &cmd->dirty_root);
1088                 if (r)
1089                         goto out;
1090         }
1091
1092         cmd->cache_blocks = new_cache_size;
1093         cmd->changed = true;
1094
1095 out:
1096         WRITE_UNLOCK(cmd);
1097
1098         return r;
1099 }
1100
1101 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1102                                    sector_t discard_block_size,
1103                                    dm_dblock_t new_nr_entries)
1104 {
1105         int r;
1106
1107         WRITE_LOCK(cmd);
1108         r = dm_bitset_resize(&cmd->discard_info,
1109                              cmd->discard_root,
1110                              from_dblock(cmd->discard_nr_blocks),
1111                              from_dblock(new_nr_entries),
1112                              false, &cmd->discard_root);
1113         if (!r) {
1114                 cmd->discard_block_size = discard_block_size;
1115                 cmd->discard_nr_blocks = new_nr_entries;
1116         }
1117
1118         cmd->changed = true;
1119         WRITE_UNLOCK(cmd);
1120
1121         return r;
1122 }
1123
1124 static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1125 {
1126         return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1127                                  from_dblock(b), &cmd->discard_root);
1128 }
1129
1130 static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1131 {
1132         return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1133                                    from_dblock(b), &cmd->discard_root);
1134 }
1135
1136 static int __discard(struct dm_cache_metadata *cmd,
1137                      dm_dblock_t dblock, bool discard)
1138 {
1139         int r;
1140
1141         r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1142         if (r)
1143                 return r;
1144
1145         cmd->changed = true;
1146         return 0;
1147 }
1148
1149 int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1150                          dm_dblock_t dblock, bool discard)
1151 {
1152         int r;
1153
1154         WRITE_LOCK(cmd);
1155         r = __discard(cmd, dblock, discard);
1156         WRITE_UNLOCK(cmd);
1157
1158         return r;
1159 }
1160
1161 static int __load_discards(struct dm_cache_metadata *cmd,
1162                            load_discard_fn fn, void *context)
1163 {
1164         int r = 0;
1165         uint32_t b;
1166         struct dm_bitset_cursor c;
1167
1168         if (from_dblock(cmd->discard_nr_blocks) == 0)
1169                 /* nothing to do */
1170                 return 0;
1171
1172         if (cmd->clean_when_opened) {
1173                 r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1174                 if (r)
1175                         return r;
1176
1177                 r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1178                                            from_dblock(cmd->discard_nr_blocks), &c);
1179                 if (r)
1180                         return r;
1181
1182                 for (b = 0; ; b++) {
1183                         r = fn(context, cmd->discard_block_size, to_dblock(b),
1184                                dm_bitset_cursor_get_value(&c));
1185                         if (r)
1186                                 break;
1187
1188                         if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
1189                                 break;
1190
1191                         r = dm_bitset_cursor_next(&c);
1192                         if (r)
1193                                 break;
1194                 }
1195
1196                 dm_bitset_cursor_end(&c);
1197
1198         } else {
1199                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1200                         r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1201                         if (r)
1202                                 return r;
1203                 }
1204         }
1205
1206         return r;
1207 }
1208
1209 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1210                            load_discard_fn fn, void *context)
1211 {
1212         int r;
1213
1214         READ_LOCK(cmd);
1215         r = __load_discards(cmd, fn, context);
1216         READ_UNLOCK(cmd);
1217
1218         return r;
1219 }
1220
1221 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1222 {
1223         int r;
1224         __le64 value = pack_value(0, 0);
1225
1226         __dm_bless_for_disk(&value);
1227         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1228                                &value, &cmd->root);
1229         if (r)
1230                 return r;
1231
1232         cmd->changed = true;
1233         return 0;
1234 }
1235
1236 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1237 {
1238         int r;
1239
1240         WRITE_LOCK(cmd);
1241         r = __remove(cmd, cblock);
1242         WRITE_UNLOCK(cmd);
1243
1244         return r;
1245 }
1246
1247 static int __insert(struct dm_cache_metadata *cmd,
1248                     dm_cblock_t cblock, dm_oblock_t oblock)
1249 {
1250         int r;
1251         __le64 value = pack_value(oblock, M_VALID);
1252
1253         __dm_bless_for_disk(&value);
1254
1255         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1256                                &value, &cmd->root);
1257         if (r)
1258                 return r;
1259
1260         cmd->changed = true;
1261         return 0;
1262 }
1263
1264 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1265                             dm_cblock_t cblock, dm_oblock_t oblock)
1266 {
1267         int r;
1268
1269         WRITE_LOCK(cmd);
1270         r = __insert(cmd, cblock, oblock);
1271         WRITE_UNLOCK(cmd);
1272
1273         return r;
1274 }
1275
1276 static bool policy_unchanged(struct dm_cache_metadata *cmd,
1277                              struct dm_cache_policy *policy)
1278 {
1279         const char *policy_name = dm_cache_policy_get_name(policy);
1280         const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1281         size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1282
1283         /*
1284          * Ensure policy names match.
1285          */
1286         if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1287                 return false;
1288
1289         /*
1290          * Ensure policy major versions match.
1291          */
1292         if (cmd->policy_version[0] != policy_version[0])
1293                 return false;
1294
1295         /*
1296          * Ensure policy hint sizes match.
1297          */
1298         if (cmd->policy_hint_size != policy_hint_size)
1299                 return false;
1300
1301         return true;
1302 }
1303
1304 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1305 {
1306         return cmd->hint_root && cmd->policy_hint_size;
1307 }
1308
1309 static bool hints_array_available(struct dm_cache_metadata *cmd,
1310                                   struct dm_cache_policy *policy)
1311 {
1312         return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1313                 hints_array_initialized(cmd);
1314 }
1315
1316 static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1317                              uint64_t cb, bool hints_valid,
1318                              struct dm_array_cursor *mapping_cursor,
1319                              struct dm_array_cursor *hint_cursor,
1320                              load_mapping_fn fn, void *context)
1321 {
1322         int r = 0;
1323
1324         __le64 mapping;
1325         __le32 hint = 0;
1326
1327         __le64 *mapping_value_le;
1328         __le32 *hint_value_le;
1329
1330         dm_oblock_t oblock;
1331         unsigned int flags;
1332         bool dirty = true;
1333
1334         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1335         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1336         unpack_value(mapping, &oblock, &flags);
1337
1338         if (flags & M_VALID) {
1339                 if (hints_valid) {
1340                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1341                         memcpy(&hint, hint_value_le, sizeof(hint));
1342                 }
1343                 if (cmd->clean_when_opened)
1344                         dirty = flags & M_DIRTY;
1345
1346                 r = fn(context, oblock, to_cblock(cb), dirty,
1347                        le32_to_cpu(hint), hints_valid);
1348                 if (r) {
1349                         DMERR("policy couldn't load cache block %llu",
1350                               (unsigned long long) from_cblock(to_cblock(cb)));
1351                 }
1352         }
1353
1354         return r;
1355 }
1356
1357 static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1358                              uint64_t cb, bool hints_valid,
1359                              struct dm_array_cursor *mapping_cursor,
1360                              struct dm_array_cursor *hint_cursor,
1361                              struct dm_bitset_cursor *dirty_cursor,
1362                              load_mapping_fn fn, void *context)
1363 {
1364         int r = 0;
1365
1366         __le64 mapping;
1367         __le32 hint = 0;
1368
1369         __le64 *mapping_value_le;
1370         __le32 *hint_value_le;
1371
1372         dm_oblock_t oblock;
1373         unsigned int flags;
1374         bool dirty = true;
1375
1376         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1377         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1378         unpack_value(mapping, &oblock, &flags);
1379
1380         if (flags & M_VALID) {
1381                 if (hints_valid) {
1382                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1383                         memcpy(&hint, hint_value_le, sizeof(hint));
1384                 }
1385                 if (cmd->clean_when_opened)
1386                         dirty = dm_bitset_cursor_get_value(dirty_cursor);
1387
1388                 r = fn(context, oblock, to_cblock(cb), dirty,
1389                        le32_to_cpu(hint), hints_valid);
1390                 if (r) {
1391                         DMERR("policy couldn't load cache block %llu",
1392                               (unsigned long long) from_cblock(to_cblock(cb)));
1393                 }
1394         }
1395
1396         return r;
1397 }
1398
1399 static int __load_mappings(struct dm_cache_metadata *cmd,
1400                            struct dm_cache_policy *policy,
1401                            load_mapping_fn fn, void *context)
1402 {
1403         int r;
1404         uint64_t cb;
1405
1406         bool hints_valid = hints_array_available(cmd, policy);
1407
1408         if (from_cblock(cmd->cache_blocks) == 0)
1409                 /* Nothing to do */
1410                 return 0;
1411
1412         r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1413         if (r)
1414                 return r;
1415
1416         if (hints_valid) {
1417                 r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1418                 if (r) {
1419                         dm_array_cursor_end(&cmd->mapping_cursor);
1420                         return r;
1421                 }
1422         }
1423
1424         if (separate_dirty_bits(cmd)) {
1425                 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1426                                            from_cblock(cmd->cache_blocks),
1427                                            &cmd->dirty_cursor);
1428                 if (r) {
1429                         dm_array_cursor_end(&cmd->hint_cursor);
1430                         dm_array_cursor_end(&cmd->mapping_cursor);
1431                         return r;
1432                 }
1433         }
1434
1435         for (cb = 0; ; cb++) {
1436                 if (separate_dirty_bits(cmd))
1437                         r = __load_mapping_v2(cmd, cb, hints_valid,
1438                                               &cmd->mapping_cursor,
1439                                               &cmd->hint_cursor,
1440                                               &cmd->dirty_cursor,
1441                                               fn, context);
1442                 else
1443                         r = __load_mapping_v1(cmd, cb, hints_valid,
1444                                               &cmd->mapping_cursor, &cmd->hint_cursor,
1445                                               fn, context);
1446                 if (r)
1447                         goto out;
1448
1449                 /*
1450                  * We need to break out before we move the cursors.
1451                  */
1452                 if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1453                         break;
1454
1455                 r = dm_array_cursor_next(&cmd->mapping_cursor);
1456                 if (r) {
1457                         DMERR("dm_array_cursor_next for mapping failed");
1458                         goto out;
1459                 }
1460
1461                 if (hints_valid) {
1462                         r = dm_array_cursor_next(&cmd->hint_cursor);
1463                         if (r) {
1464                                 dm_array_cursor_end(&cmd->hint_cursor);
1465                                 hints_valid = false;
1466                         }
1467                 }
1468
1469                 if (separate_dirty_bits(cmd)) {
1470                         r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1471                         if (r) {
1472                                 DMERR("dm_bitset_cursor_next for dirty failed");
1473                                 goto out;
1474                         }
1475                 }
1476         }
1477 out:
1478         dm_array_cursor_end(&cmd->mapping_cursor);
1479         if (hints_valid)
1480                 dm_array_cursor_end(&cmd->hint_cursor);
1481
1482         if (separate_dirty_bits(cmd))
1483                 dm_bitset_cursor_end(&cmd->dirty_cursor);
1484
1485         return r;
1486 }
1487
1488 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1489                            struct dm_cache_policy *policy,
1490                            load_mapping_fn fn, void *context)
1491 {
1492         int r;
1493
1494         READ_LOCK(cmd);
1495         r = __load_mappings(cmd, policy, fn, context);
1496         READ_UNLOCK(cmd);
1497
1498         return r;
1499 }
1500
1501 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1502 {
1503         int r;
1504
1505         READ_LOCK(cmd);
1506         r = cmd->changed;
1507         READ_UNLOCK(cmd);
1508
1509         return r;
1510 }
1511
1512 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1513 {
1514         int r;
1515         unsigned int flags;
1516         dm_oblock_t oblock;
1517         __le64 value;
1518
1519         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1520         if (r)
1521                 return r;
1522
1523         unpack_value(value, &oblock, &flags);
1524
1525         if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1526                 /* nothing to be done */
1527                 return 0;
1528
1529         value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1530         __dm_bless_for_disk(&value);
1531
1532         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1533                                &value, &cmd->root);
1534         if (r)
1535                 return r;
1536
1537         cmd->changed = true;
1538         return 0;
1539
1540 }
1541
1542 static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1543 {
1544         int r;
1545         unsigned int i;
1546
1547         for (i = 0; i < nr_bits; i++) {
1548                 r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1549                 if (r)
1550                         return r;
1551         }
1552
1553         return 0;
1554 }
1555
1556 static int is_dirty_callback(uint32_t index, bool *value, void *context)
1557 {
1558         unsigned long *bits = context;
1559         *value = test_bit(index, bits);
1560         return 0;
1561 }
1562
1563 static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
1564 {
1565         int r = 0;
1566
1567         /* nr_bits is really just a sanity check */
1568         if (nr_bits != from_cblock(cmd->cache_blocks)) {
1569                 DMERR("dirty bitset is wrong size");
1570                 return -EINVAL;
1571         }
1572
1573         r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1574         if (r)
1575                 return r;
1576
1577         cmd->changed = true;
1578         return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1579 }
1580
1581 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1582                             unsigned int nr_bits,
1583                             unsigned long *bits)
1584 {
1585         int r;
1586
1587         WRITE_LOCK(cmd);
1588         if (separate_dirty_bits(cmd))
1589                 r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1590         else
1591                 r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1592         WRITE_UNLOCK(cmd);
1593
1594         return r;
1595 }
1596
1597 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1598                                  struct dm_cache_statistics *stats)
1599 {
1600         READ_LOCK_VOID(cmd);
1601         *stats = cmd->stats;
1602         READ_UNLOCK(cmd);
1603 }
1604
1605 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1606                                  struct dm_cache_statistics *stats)
1607 {
1608         WRITE_LOCK_VOID(cmd);
1609         cmd->stats = *stats;
1610         WRITE_UNLOCK(cmd);
1611 }
1612
1613 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1614 {
1615         int r = -EINVAL;
1616         flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1617                                  clear_clean_shutdown);
1618
1619         WRITE_LOCK(cmd);
1620         if (cmd->fail_io)
1621                 goto out;
1622
1623         r = __commit_transaction(cmd, mutator);
1624         if (r)
1625                 goto out;
1626
1627         r = __begin_transaction(cmd);
1628 out:
1629         WRITE_UNLOCK(cmd);
1630         return r;
1631 }
1632
1633 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1634                                            dm_block_t *result)
1635 {
1636         int r = -EINVAL;
1637
1638         READ_LOCK(cmd);
1639         if (!cmd->fail_io)
1640                 r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1641         READ_UNLOCK(cmd);
1642
1643         return r;
1644 }
1645
1646 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1647                                    dm_block_t *result)
1648 {
1649         int r = -EINVAL;
1650
1651         READ_LOCK(cmd);
1652         if (!cmd->fail_io)
1653                 r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1654         READ_UNLOCK(cmd);
1655
1656         return r;
1657 }
1658
1659 /*----------------------------------------------------------------*/
1660
1661 static int get_hint(uint32_t index, void *value_le, void *context)
1662 {
1663         uint32_t value;
1664         struct dm_cache_policy *policy = context;
1665
1666         value = policy_get_hint(policy, to_cblock(index));
1667         *((__le32 *) value_le) = cpu_to_le32(value);
1668
1669         return 0;
1670 }
1671
1672 /*
1673  * It's quicker to always delete the hint array, and recreate with
1674  * dm_array_new().
1675  */
1676 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1677 {
1678         int r;
1679         size_t hint_size;
1680         const char *policy_name = dm_cache_policy_get_name(policy);
1681         const unsigned int *policy_version = dm_cache_policy_get_version(policy);
1682
1683         if (!policy_name[0] ||
1684             (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1685                 return -EINVAL;
1686
1687         strscpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1688         memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1689
1690         hint_size = dm_cache_policy_get_hint_size(policy);
1691         if (!hint_size)
1692                 return 0; /* short-circuit hints initialization */
1693         cmd->policy_hint_size = hint_size;
1694
1695         if (cmd->hint_root) {
1696                 r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1697                 if (r)
1698                         return r;
1699         }
1700
1701         return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1702                             from_cblock(cmd->cache_blocks),
1703                             get_hint, policy);
1704 }
1705
1706 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1707 {
1708         int r;
1709
1710         WRITE_LOCK(cmd);
1711         r = write_hints(cmd, policy);
1712         WRITE_UNLOCK(cmd);
1713
1714         return r;
1715 }
1716
1717 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1718 {
1719         int r;
1720
1721         READ_LOCK(cmd);
1722         r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1723         READ_UNLOCK(cmd);
1724
1725         return r;
1726 }
1727
1728 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1729 {
1730         WRITE_LOCK_VOID(cmd);
1731         dm_bm_set_read_only(cmd->bm);
1732         WRITE_UNLOCK(cmd);
1733 }
1734
1735 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1736 {
1737         WRITE_LOCK_VOID(cmd);
1738         dm_bm_set_read_write(cmd->bm);
1739         WRITE_UNLOCK(cmd);
1740 }
1741
1742 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1743 {
1744         int r;
1745         struct dm_block *sblock;
1746         struct cache_disk_superblock *disk_super;
1747
1748         WRITE_LOCK(cmd);
1749         set_bit(NEEDS_CHECK, &cmd->flags);
1750
1751         r = superblock_lock(cmd, &sblock);
1752         if (r) {
1753                 DMERR("couldn't read superblock");
1754                 goto out;
1755         }
1756
1757         disk_super = dm_block_data(sblock);
1758         disk_super->flags = cpu_to_le32(cmd->flags);
1759
1760         dm_bm_unlock(sblock);
1761
1762 out:
1763         WRITE_UNLOCK(cmd);
1764         return r;
1765 }
1766
1767 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1768 {
1769         READ_LOCK(cmd);
1770         *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1771         READ_UNLOCK(cmd);
1772
1773         return 0;
1774 }
1775
1776 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1777 {
1778         int r = -EINVAL;
1779         struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
1780
1781         /* fail_io is double-checked with cmd->root_lock held below */
1782         if (unlikely(cmd->fail_io))
1783                 return r;
1784
1785         /*
1786          * Replacement block manager (new_bm) is created and old_bm destroyed outside of
1787          * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
1788          * shrinker associated with the block manager's bufio client vs cmd root_lock).
1789          * - must take shrinker_mutex without holding cmd->root_lock
1790          */
1791         new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
1792                                          CACHE_MAX_CONCURRENT_LOCKS);
1793
1794         WRITE_LOCK(cmd);
1795         if (cmd->fail_io) {
1796                 WRITE_UNLOCK(cmd);
1797                 goto out;
1798         }
1799
1800         __destroy_persistent_data_objects(cmd, false);
1801         old_bm = cmd->bm;
1802         if (IS_ERR(new_bm)) {
1803                 DMERR("could not create block manager during abort");
1804                 cmd->bm = NULL;
1805                 r = PTR_ERR(new_bm);
1806                 goto out_unlock;
1807         }
1808
1809         cmd->bm = new_bm;
1810         r = __open_or_format_metadata(cmd, false);
1811         if (r) {
1812                 cmd->bm = NULL;
1813                 goto out_unlock;
1814         }
1815         new_bm = NULL;
1816 out_unlock:
1817         if (r)
1818                 cmd->fail_io = true;
1819         WRITE_UNLOCK(cmd);
1820         dm_block_manager_destroy(old_bm);
1821 out:
1822         if (new_bm && !IS_ERR(new_bm))
1823                 dm_block_manager_destroy(new_bm);
1824
1825         return r;
1826 }
This page took 0.131709 seconds and 4 git commands to generate.