1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010-2011 Neil Brown
4 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
6 * This file is released under the GPL.
9 #include <linux/slab.h>
10 #include <linux/module.h>
16 #include "md-bitmap.h"
18 #include <linux/device-mapper.h>
20 #define DM_MSG_PREFIX "raid"
21 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
24 * Minimum sectors of free reshape space per raid device
26 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
29 * Minimum journal space 4 MiB in sectors.
31 #define MIN_RAID456_JOURNAL_SPACE (4*2048)
33 static bool devices_handle_discard_safely;
36 * The following flags are used by dm-raid to set up the array state.
37 * They must be cleared before md_run is called.
39 #define FirstUse 10 /* rdev flag */
43 * Two DM devices, one to hold metadata and one to hold the
44 * actual data/parity. The reason for this is to not confuse
45 * ti->len and give more flexibility in altering size and
48 * While it is possible for this device to be associated
49 * with a different physical device than the data_dev, it
50 * is intended for it to be the same.
51 * |--------- Physical Device ---------|
52 * |- meta_dev -|------ data_dev ------|
54 struct dm_dev *meta_dev;
55 struct dm_dev *data_dev;
60 * Bits for establishing rs->ctr_flags
65 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
66 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
67 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
68 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
69 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
70 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
71 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
72 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
73 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
74 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
75 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
76 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
78 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
79 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
80 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
83 #define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6 (journal device)! */
86 #define __CTR_FLAG_JOURNAL_MODE 16 /* 2 */ /* Only with raid4/5/6 (journal mode)! */
89 * Flags for rs->ctr_flags field.
91 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
92 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
93 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
94 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
95 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
96 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
97 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
98 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
99 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
100 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
101 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
102 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
103 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
104 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
105 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
106 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
107 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
110 * Definitions of various constructor flags to
111 * be used in checks of valid / invalid flags
114 /* Define all any sync flags */
115 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
117 /* Define flags for options without argument (e.g. 'nosync') */
118 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
119 CTR_FLAG_RAID10_USE_NEAR_SETS)
121 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
122 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
123 CTR_FLAG_WRITE_MOSTLY | \
124 CTR_FLAG_DAEMON_SLEEP | \
125 CTR_FLAG_MIN_RECOVERY_RATE | \
126 CTR_FLAG_MAX_RECOVERY_RATE | \
127 CTR_FLAG_MAX_WRITE_BEHIND | \
128 CTR_FLAG_STRIPE_CACHE | \
129 CTR_FLAG_REGION_SIZE | \
130 CTR_FLAG_RAID10_COPIES | \
131 CTR_FLAG_RAID10_FORMAT | \
132 CTR_FLAG_DELTA_DISKS | \
133 CTR_FLAG_DATA_OFFSET | \
134 CTR_FLAG_JOURNAL_DEV | \
135 CTR_FLAG_JOURNAL_MODE)
137 /* Valid options definitions per raid level... */
139 /* "raid0" does only accept data offset */
140 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
142 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
143 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
145 CTR_FLAG_WRITE_MOSTLY | \
146 CTR_FLAG_DAEMON_SLEEP | \
147 CTR_FLAG_MIN_RECOVERY_RATE | \
148 CTR_FLAG_MAX_RECOVERY_RATE | \
149 CTR_FLAG_MAX_WRITE_BEHIND | \
150 CTR_FLAG_REGION_SIZE | \
151 CTR_FLAG_DELTA_DISKS | \
152 CTR_FLAG_DATA_OFFSET)
154 /* "raid10" does not accept any raid1 or stripe cache options */
155 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
157 CTR_FLAG_DAEMON_SLEEP | \
158 CTR_FLAG_MIN_RECOVERY_RATE | \
159 CTR_FLAG_MAX_RECOVERY_RATE | \
160 CTR_FLAG_REGION_SIZE | \
161 CTR_FLAG_RAID10_COPIES | \
162 CTR_FLAG_RAID10_FORMAT | \
163 CTR_FLAG_DELTA_DISKS | \
164 CTR_FLAG_DATA_OFFSET | \
165 CTR_FLAG_RAID10_USE_NEAR_SETS)
168 * "raid4/5/6" do not accept any raid1 or raid10 specific options
170 * "raid6" does not accept "nosync", because it is not guaranteed
171 * that both parity and q-syndrome are being written properly with
174 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
176 CTR_FLAG_DAEMON_SLEEP | \
177 CTR_FLAG_MIN_RECOVERY_RATE | \
178 CTR_FLAG_MAX_RECOVERY_RATE | \
179 CTR_FLAG_STRIPE_CACHE | \
180 CTR_FLAG_REGION_SIZE | \
181 CTR_FLAG_DELTA_DISKS | \
182 CTR_FLAG_DATA_OFFSET | \
183 CTR_FLAG_JOURNAL_DEV | \
184 CTR_FLAG_JOURNAL_MODE)
186 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
188 CTR_FLAG_DAEMON_SLEEP | \
189 CTR_FLAG_MIN_RECOVERY_RATE | \
190 CTR_FLAG_MAX_RECOVERY_RATE | \
191 CTR_FLAG_STRIPE_CACHE | \
192 CTR_FLAG_REGION_SIZE | \
193 CTR_FLAG_DELTA_DISKS | \
194 CTR_FLAG_DATA_OFFSET | \
195 CTR_FLAG_JOURNAL_DEV | \
196 CTR_FLAG_JOURNAL_MODE)
197 /* ...valid options definitions per raid level */
200 * Flags for rs->runtime_flags field
201 * (RT_FLAG prefix meaning "runtime flag")
203 * These are all internal and used to define runtime state,
204 * e.g. to prevent another resume from preresume processing
205 * the raid set all over again.
207 #define RT_FLAG_RS_PRERESUMED 0
208 #define RT_FLAG_RS_RESUMED 1
209 #define RT_FLAG_RS_BITMAP_LOADED 2
210 #define RT_FLAG_UPDATE_SBS 3
211 #define RT_FLAG_RESHAPE_RS 4
212 #define RT_FLAG_RS_SUSPENDED 5
213 #define RT_FLAG_RS_IN_SYNC 6
214 #define RT_FLAG_RS_RESYNCING 7
215 #define RT_FLAG_RS_GROW 8
217 /* Array elements of 64 bit needed for rebuild/failed disk bits */
218 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
221 * raid set level, layout and chunk sectors backup/restore
226 int new_chunk_sectors;
230 struct dm_target *ti;
232 uint32_t stripe_cache_entries;
233 unsigned long ctr_flags;
234 unsigned long runtime_flags;
236 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
242 int requested_bitmap_chunk_sectors;
245 struct raid_type *raid_type;
247 sector_t array_sectors;
248 sector_t dev_sectors;
250 /* Optional raid4/5/6 journal device */
257 struct raid_dev dev[];
260 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
262 struct mddev *mddev = &rs->md;
264 l->new_level = mddev->new_level;
265 l->new_layout = mddev->new_layout;
266 l->new_chunk_sectors = mddev->new_chunk_sectors;
269 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
271 struct mddev *mddev = &rs->md;
273 mddev->new_level = l->new_level;
274 mddev->new_layout = l->new_layout;
275 mddev->new_chunk_sectors = l->new_chunk_sectors;
278 /* raid10 algorithms (i.e. formats) */
279 #define ALGORITHM_RAID10_DEFAULT 0
280 #define ALGORITHM_RAID10_NEAR 1
281 #define ALGORITHM_RAID10_OFFSET 2
282 #define ALGORITHM_RAID10_FAR 3
284 /* Supported raid types and properties. */
285 static struct raid_type {
286 const char *name; /* RAID algorithm. */
287 const char *descr; /* Descriptor text for logging. */
288 const unsigned int parity_devs; /* # of parity devices. */
289 const unsigned int minimal_devs;/* minimal # of devices in set. */
290 const unsigned int level; /* RAID level. */
291 const unsigned int algorithm; /* RAID algorithm. */
293 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
294 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
295 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
296 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
297 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
298 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
299 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
300 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
301 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
302 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
303 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
304 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
305 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
306 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
307 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
308 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
309 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
310 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
311 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
312 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
315 /* True, if @v is in inclusive range [@min, @max] */
316 static bool __within_range(long v, long min, long max)
318 return v >= min && v <= max;
321 /* All table line arguments are defined here */
322 static struct arg_name_flag {
323 const unsigned long flag;
325 } __arg_name_flags[] = {
326 { CTR_FLAG_SYNC, "sync"},
327 { CTR_FLAG_NOSYNC, "nosync"},
328 { CTR_FLAG_REBUILD, "rebuild"},
329 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
330 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
331 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
332 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
333 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
334 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
335 { CTR_FLAG_REGION_SIZE, "region_size"},
336 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
337 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
338 { CTR_FLAG_DATA_OFFSET, "data_offset"},
339 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
340 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
341 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
342 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
345 /* Return argument name string for given @flag */
346 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
348 if (hweight32(flag) == 1) {
349 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
351 while (anf-- > __arg_name_flags)
352 if (flag & anf->flag)
356 DMERR("%s called with more than one flag!", __func__);
361 /* Define correlation of raid456 journal cache modes and dm-raid target line parameters */
365 } _raid456_journal_mode[] = {
366 { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
367 { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
370 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
371 static int dm_raid_journal_mode_to_md(const char *mode)
373 int m = ARRAY_SIZE(_raid456_journal_mode);
376 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
377 return _raid456_journal_mode[m].mode;
382 /* Return dm-raid raid4/5/6 journal mode string for @mode */
383 static const char *md_journal_mode_to_dm_raid(const int mode)
385 int m = ARRAY_SIZE(_raid456_journal_mode);
388 if (mode == _raid456_journal_mode[m].mode)
389 return _raid456_journal_mode[m].param;
395 * Bool helpers to test for various raid levels of a raid set.
396 * It's level as reported by the superblock rather than
397 * the requested raid_type passed to the constructor.
399 /* Return true, if raid set in @rs is raid0 */
400 static bool rs_is_raid0(struct raid_set *rs)
402 return !rs->md.level;
405 /* Return true, if raid set in @rs is raid1 */
406 static bool rs_is_raid1(struct raid_set *rs)
408 return rs->md.level == 1;
411 /* Return true, if raid set in @rs is raid10 */
412 static bool rs_is_raid10(struct raid_set *rs)
414 return rs->md.level == 10;
417 /* Return true, if raid set in @rs is level 6 */
418 static bool rs_is_raid6(struct raid_set *rs)
420 return rs->md.level == 6;
423 /* Return true, if raid set in @rs is level 4, 5 or 6 */
424 static bool rs_is_raid456(struct raid_set *rs)
426 return __within_range(rs->md.level, 4, 6);
429 /* Return true, if raid set in @rs is reshapable */
430 static bool __is_raid10_far(int layout);
431 static bool rs_is_reshapable(struct raid_set *rs)
433 return rs_is_raid456(rs) ||
434 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
437 /* Return true, if raid set in @rs is recovering */
438 static bool rs_is_recovering(struct raid_set *rs)
440 return rs->md.recovery_cp < rs->md.dev_sectors;
443 /* Return true, if raid set in @rs is reshaping */
444 static bool rs_is_reshaping(struct raid_set *rs)
446 return rs->md.reshape_position != MaxSector;
450 * bool helpers to test for various raid levels of a raid type @rt
453 /* Return true, if raid type in @rt is raid0 */
454 static bool rt_is_raid0(struct raid_type *rt)
459 /* Return true, if raid type in @rt is raid1 */
460 static bool rt_is_raid1(struct raid_type *rt)
462 return rt->level == 1;
465 /* Return true, if raid type in @rt is raid10 */
466 static bool rt_is_raid10(struct raid_type *rt)
468 return rt->level == 10;
471 /* Return true, if raid type in @rt is raid4/5 */
472 static bool rt_is_raid45(struct raid_type *rt)
474 return __within_range(rt->level, 4, 5);
477 /* Return true, if raid type in @rt is raid6 */
478 static bool rt_is_raid6(struct raid_type *rt)
480 return rt->level == 6;
483 /* Return true, if raid type in @rt is raid4/5/6 */
484 static bool rt_is_raid456(struct raid_type *rt)
486 return __within_range(rt->level, 4, 6);
488 /* END: raid level bools */
490 /* Return valid ctr flags for the raid level of @rs */
491 static unsigned long __valid_flags(struct raid_set *rs)
493 if (rt_is_raid0(rs->raid_type))
494 return RAID0_VALID_FLAGS;
495 else if (rt_is_raid1(rs->raid_type))
496 return RAID1_VALID_FLAGS;
497 else if (rt_is_raid10(rs->raid_type))
498 return RAID10_VALID_FLAGS;
499 else if (rt_is_raid45(rs->raid_type))
500 return RAID45_VALID_FLAGS;
501 else if (rt_is_raid6(rs->raid_type))
502 return RAID6_VALID_FLAGS;
508 * Check for valid flags set on @rs
510 * Has to be called after parsing of the ctr flags!
512 static int rs_check_for_valid_flags(struct raid_set *rs)
514 if (rs->ctr_flags & ~__valid_flags(rs)) {
515 rs->ti->error = "Invalid flags combination";
522 /* MD raid10 bit definitions and helpers */
523 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
524 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
525 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
526 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
528 /* Return md raid10 near copies for @layout */
529 static unsigned int __raid10_near_copies(int layout)
531 return layout & 0xFF;
534 /* Return md raid10 far copies for @layout */
535 static unsigned int __raid10_far_copies(int layout)
537 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
540 /* Return true if md raid10 offset for @layout */
541 static bool __is_raid10_offset(int layout)
543 return !!(layout & RAID10_OFFSET);
546 /* Return true if md raid10 near for @layout */
547 static bool __is_raid10_near(int layout)
549 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
552 /* Return true if md raid10 far for @layout */
553 static bool __is_raid10_far(int layout)
555 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
558 /* Return md raid10 layout string for @layout */
559 static const char *raid10_md_layout_to_format(int layout)
562 * Bit 16 stands for "offset"
563 * (i.e. adjacent stripes hold copies)
565 * Refer to MD's raid10.c for details
567 if (__is_raid10_offset(layout))
570 if (__raid10_near_copies(layout) > 1)
573 if (__raid10_far_copies(layout) > 1)
579 /* Return md raid10 algorithm for @name */
580 static int raid10_name_to_format(const char *name)
582 if (!strcasecmp(name, "near"))
583 return ALGORITHM_RAID10_NEAR;
584 else if (!strcasecmp(name, "offset"))
585 return ALGORITHM_RAID10_OFFSET;
586 else if (!strcasecmp(name, "far"))
587 return ALGORITHM_RAID10_FAR;
592 /* Return md raid10 copies for @layout */
593 static unsigned int raid10_md_layout_to_copies(int layout)
595 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
598 /* Return md raid10 format id for @format string */
599 static int raid10_format_to_md_layout(struct raid_set *rs,
600 unsigned int algorithm,
603 unsigned int n = 1, f = 1, r = 0;
606 * MD resilienece flaw:
608 * enabling use_far_sets for far/offset formats causes copies
609 * to be colocated on the same devs together with their origins!
611 * -> disable it for now in the definition above
613 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
614 algorithm == ALGORITHM_RAID10_NEAR)
617 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
620 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
621 r |= RAID10_USE_FAR_SETS;
623 } else if (algorithm == ALGORITHM_RAID10_FAR) {
625 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
626 r |= RAID10_USE_FAR_SETS;
631 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
633 /* END: MD raid10 bit definitions and helpers */
635 /* Check for any of the raid10 algorithms */
636 static bool __got_raid10(struct raid_type *rtp, const int layout)
638 if (rtp->level == 10) {
639 switch (rtp->algorithm) {
640 case ALGORITHM_RAID10_DEFAULT:
641 case ALGORITHM_RAID10_NEAR:
642 return __is_raid10_near(layout);
643 case ALGORITHM_RAID10_OFFSET:
644 return __is_raid10_offset(layout);
645 case ALGORITHM_RAID10_FAR:
646 return __is_raid10_far(layout);
655 /* Return raid_type for @name */
656 static struct raid_type *get_raid_type(const char *name)
658 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
660 while (rtp-- > raid_types)
661 if (!strcasecmp(rtp->name, name))
667 /* Return raid_type for @name based derived from @level and @layout */
668 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
670 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
672 while (rtp-- > raid_types) {
673 /* RAID10 special checks based on @layout flags/properties */
674 if (rtp->level == level &&
675 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
682 /* Adjust rdev sectors */
683 static void rs_set_rdev_sectors(struct raid_set *rs)
685 struct mddev *mddev = &rs->md;
686 struct md_rdev *rdev;
689 * raid10 sets rdev->sector to the device size, which
690 * is unintended in case of out-of-place reshaping
692 rdev_for_each(rdev, mddev)
693 if (!test_bit(Journal, &rdev->flags))
694 rdev->sectors = mddev->dev_sectors;
698 * Change bdev capacity of @rs in case of a disk add/remove reshape
700 static void rs_set_capacity(struct raid_set *rs)
702 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
704 set_capacity_and_notify(gendisk, rs->md.array_sectors);
708 * Set the mddev properties in @rs to the current
709 * ones retrieved from the freshest superblock
711 static void rs_set_cur(struct raid_set *rs)
713 struct mddev *mddev = &rs->md;
715 mddev->new_level = mddev->level;
716 mddev->new_layout = mddev->layout;
717 mddev->new_chunk_sectors = mddev->chunk_sectors;
721 * Set the mddev properties in @rs to the new
722 * ones requested by the ctr
724 static void rs_set_new(struct raid_set *rs)
726 struct mddev *mddev = &rs->md;
728 mddev->level = mddev->new_level;
729 mddev->layout = mddev->new_layout;
730 mddev->chunk_sectors = mddev->new_chunk_sectors;
731 mddev->raid_disks = rs->raid_disks;
732 mddev->delta_disks = 0;
735 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
736 unsigned int raid_devs)
741 if (raid_devs <= raid_type->parity_devs) {
742 ti->error = "Insufficient number of devices";
743 return ERR_PTR(-EINVAL);
746 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
748 ti->error = "Cannot allocate raid context";
749 return ERR_PTR(-ENOMEM);
754 rs->raid_disks = raid_devs;
758 rs->raid_type = raid_type;
759 rs->stripe_cache_entries = 256;
760 rs->md.raid_disks = raid_devs;
761 rs->md.level = raid_type->level;
762 rs->md.new_level = rs->md.level;
763 rs->md.layout = raid_type->algorithm;
764 rs->md.new_layout = rs->md.layout;
765 rs->md.delta_disks = 0;
766 rs->md.recovery_cp = MaxSector;
768 for (i = 0; i < raid_devs; i++)
769 md_rdev_init(&rs->dev[i].rdev);
772 * Remaining items to be initialized by further RAID params:
775 * rs->md.chunk_sectors
776 * rs->md.new_chunk_sectors
783 /* Free all @rs allocations */
784 static void raid_set_free(struct raid_set *rs)
788 if (rs->journal_dev.dev) {
789 md_rdev_clear(&rs->journal_dev.rdev);
790 dm_put_device(rs->ti, rs->journal_dev.dev);
793 for (i = 0; i < rs->raid_disks; i++) {
794 if (rs->dev[i].meta_dev)
795 dm_put_device(rs->ti, rs->dev[i].meta_dev);
796 md_rdev_clear(&rs->dev[i].rdev);
797 if (rs->dev[i].data_dev)
798 dm_put_device(rs->ti, rs->dev[i].data_dev);
805 * For every device we have two words
806 * <meta_dev>: meta device name or '-' if missing
807 * <data_dev>: data device name or '-' if missing
809 * The following are permitted:
812 * <meta_dev> <data_dev>
814 * The following is not allowed:
817 * This code parses those words. If there is a failure,
818 * the caller must use raid_set_free() to unwind the operations.
820 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
824 int metadata_available = 0;
828 /* Put off the number of raid devices argument to get to dev pairs */
829 arg = dm_shift_arg(as);
833 for (i = 0; i < rs->raid_disks; i++) {
834 rs->dev[i].rdev.raid_disk = i;
836 rs->dev[i].meta_dev = NULL;
837 rs->dev[i].data_dev = NULL;
840 * There are no offsets initially.
841 * Out of place reshape will set them accordingly.
843 rs->dev[i].rdev.data_offset = 0;
844 rs->dev[i].rdev.new_data_offset = 0;
845 rs->dev[i].rdev.mddev = &rs->md;
847 arg = dm_shift_arg(as);
851 if (strcmp(arg, "-")) {
852 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
853 &rs->dev[i].meta_dev);
855 rs->ti->error = "RAID metadata device lookup failure";
859 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
860 if (!rs->dev[i].rdev.sb_page) {
861 rs->ti->error = "Failed to allocate superblock page";
866 arg = dm_shift_arg(as);
870 if (!strcmp(arg, "-")) {
871 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
872 (!rs->dev[i].rdev.recovery_offset)) {
873 rs->ti->error = "Drive designated for rebuild not specified";
877 if (rs->dev[i].meta_dev) {
878 rs->ti->error = "No data device supplied with metadata device";
885 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
886 &rs->dev[i].data_dev);
888 rs->ti->error = "RAID device lookup failure";
892 if (rs->dev[i].meta_dev) {
893 metadata_available = 1;
894 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
896 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
897 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
898 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
902 if (rs->journal_dev.dev)
903 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
905 if (metadata_available) {
907 rs->md.persistent = 1;
908 rs->md.major_version = 2;
909 } else if (rebuild && !rs->md.recovery_cp) {
911 * Without metadata, we will not be able to tell if the array
912 * is in-sync or not - we must assume it is not. Therefore,
913 * it is impossible to rebuild a drive.
915 * Even if there is metadata, the on-disk information may
916 * indicate that the array is not in-sync and it will then
919 * User could specify 'nosync' option if desperate.
921 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
929 * validate_region_size
931 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
933 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
934 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
936 * Returns: 0 on success, -EINVAL on failure.
938 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
940 unsigned long min_region_size = rs->ti->len / (1 << 21);
947 * Choose a reasonable default. All figures in sectors.
949 if (min_region_size > (1 << 13)) {
950 /* If not a power of 2, make it the next power of 2 */
951 region_size = roundup_pow_of_two(min_region_size);
952 DMINFO("Choosing default region size of %lu sectors",
955 DMINFO("Choosing default region size of 4MiB");
956 region_size = 1 << 13; /* sectors */
960 * Validate user-supplied value.
962 if (region_size > rs->ti->len) {
963 rs->ti->error = "Supplied region size is too large";
967 if (region_size < min_region_size) {
968 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
969 region_size, min_region_size);
970 rs->ti->error = "Supplied region size is too small";
974 if (!is_power_of_2(region_size)) {
975 rs->ti->error = "Region size is not a power of 2";
979 if (region_size < rs->md.chunk_sectors) {
980 rs->ti->error = "Region size is smaller than the chunk size";
986 * Convert sectors to bytes.
988 rs->md.bitmap_info.chunksize = to_bytes(region_size);
994 * validate_raid_redundancy
997 * Determine if there are enough devices in the array that haven't
998 * failed (or are being rebuilt) to form a usable array.
1000 * Returns: 0 on success, -EINVAL on failure.
1002 static int validate_raid_redundancy(struct raid_set *rs)
1004 unsigned int i, rebuild_cnt = 0;
1005 unsigned int rebuilds_per_group = 0, copies, raid_disks;
1006 unsigned int group_size, last_group_start;
1008 for (i = 0; i < rs->raid_disks; i++)
1009 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1010 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1011 !rs->dev[i].rdev.sb_page)))
1014 switch (rs->md.level) {
1018 if (rebuild_cnt >= rs->md.raid_disks)
1024 if (rebuild_cnt > rs->raid_type->parity_devs)
1028 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1030 DMERR("Bogus raid10 data copies < 2!");
1034 if (rebuild_cnt < copies)
1038 * It is possible to have a higher rebuild count for RAID10,
1039 * as long as the failed devices occur in different mirror
1040 * groups (i.e. different stripes).
1042 * When checking "near" format, make sure no adjacent devices
1043 * have failed beyond what can be handled. In addition to the
1044 * simple case where the number of devices is a multiple of the
1045 * number of copies, we must also handle cases where the number
1046 * of devices is not a multiple of the number of copies.
1047 * E.g. dev1 dev2 dev3 dev4 dev5
1051 raid_disks = min(rs->raid_disks, rs->md.raid_disks);
1052 if (__is_raid10_near(rs->md.new_layout)) {
1053 for (i = 0; i < raid_disks; i++) {
1055 rebuilds_per_group = 0;
1056 if ((!rs->dev[i].rdev.sb_page ||
1057 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1058 (++rebuilds_per_group >= copies))
1065 * When checking "far" and "offset" formats, we need to ensure
1066 * that the device that holds its copy is not also dead or
1067 * being rebuilt. (Note that "far" and "offset" formats only
1068 * support two copies right now. These formats also only ever
1069 * use the 'use_far_sets' variant.)
1071 * This check is somewhat complicated by the need to account
1072 * for arrays that are not a multiple of (far) copies. This
1073 * results in the need to treat the last (potentially larger)
1076 group_size = (raid_disks / copies);
1077 last_group_start = (raid_disks / group_size) - 1;
1078 last_group_start *= group_size;
1079 for (i = 0; i < raid_disks; i++) {
1080 if (!(i % copies) && !(i > last_group_start))
1081 rebuilds_per_group = 0;
1082 if ((!rs->dev[i].rdev.sb_page ||
1083 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1084 (++rebuilds_per_group >= copies))
1100 * Possible arguments are...
1101 * <chunk_size> [optional_args]
1103 * Argument definitions
1104 * <chunk_size> The number of sectors per disk that
1105 * will form the "stripe"
1106 * [[no]sync] Force or prevent recovery of the
1108 * [rebuild <idx>] Rebuild the drive indicated by the index
1109 * [daemon_sleep <ms>] Time between bitmap daemon work to
1111 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1112 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
1113 * [write_mostly <idx>] Indicate a write mostly drive via index
1114 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
1115 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
1116 * [region_size <sectors>] Defines granularity of bitmap
1117 * [journal_dev <dev>] raid4/5/6 journaling deviice
1118 * (i.e. write hole closing log)
1120 * RAID10-only options:
1121 * [raid10_copies <# copies>] Number of copies. (Default: 2)
1122 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
1124 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1125 unsigned int num_raid_params)
1127 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1128 unsigned int raid10_copies = 2;
1129 unsigned int i, write_mostly = 0;
1130 unsigned int region_size = 0;
1131 sector_t max_io_len;
1132 const char *arg, *key;
1133 struct raid_dev *rd;
1134 struct raid_type *rt = rs->raid_type;
1136 arg = dm_shift_arg(as);
1137 num_raid_params--; /* Account for chunk_size argument */
1139 if (kstrtoint(arg, 10, &value) < 0) {
1140 rs->ti->error = "Bad numerical argument given for chunk_size";
1145 * First, parse the in-order required arguments
1146 * "chunk_size" is the only argument of this type.
1148 if (rt_is_raid1(rt)) {
1150 DMERR("Ignoring chunk size parameter for RAID 1");
1152 } else if (!is_power_of_2(value)) {
1153 rs->ti->error = "Chunk size must be a power of 2";
1155 } else if (value < 8) {
1156 rs->ti->error = "Chunk size value is too small";
1160 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1163 * We set each individual device as In_sync with a completed
1164 * 'recovery_offset'. If there has been a device failure or
1165 * replacement then one of the following cases applies:
1167 * 1) User specifies 'rebuild'.
1168 * - Device is reset when param is read.
1169 * 2) A new device is supplied.
1170 * - No matching superblock found, resets device.
1171 * 3) Device failure was transient and returns on reload.
1172 * - Failure noticed, resets device for bitmap replay.
1173 * 4) Device hadn't completed recovery after previous failure.
1174 * - Superblock is read and overrides recovery_offset.
1176 * What is found in the superblocks of the devices is always
1177 * authoritative, unless 'rebuild' or '[no]sync' was specified.
1179 for (i = 0; i < rs->raid_disks; i++) {
1180 set_bit(In_sync, &rs->dev[i].rdev.flags);
1181 rs->dev[i].rdev.recovery_offset = MaxSector;
1185 * Second, parse the unordered optional arguments
1187 for (i = 0; i < num_raid_params; i++) {
1188 key = dm_shift_arg(as);
1190 rs->ti->error = "Not enough raid parameters given";
1194 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1195 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1196 rs->ti->error = "Only one 'nosync' argument allowed";
1201 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1202 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1203 rs->ti->error = "Only one 'sync' argument allowed";
1208 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1209 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1210 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1216 arg = dm_shift_arg(as);
1217 i++; /* Account for the argument pairs */
1219 rs->ti->error = "Wrong number of raid parameters given";
1224 * Parameters that take a string value are checked here.
1226 /* "raid10_format {near|offset|far} */
1227 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1228 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1229 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1232 if (!rt_is_raid10(rt)) {
1233 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1236 raid10_format = raid10_name_to_format(arg);
1237 if (raid10_format < 0) {
1238 rs->ti->error = "Invalid 'raid10_format' value given";
1239 return raid10_format;
1244 /* "journal_dev <dev>" */
1245 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1247 struct md_rdev *jdev;
1249 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1250 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1253 if (!rt_is_raid456(rt)) {
1254 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1257 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1258 &rs->journal_dev.dev);
1260 rs->ti->error = "raid4/5/6 journal device lookup failure";
1263 jdev = &rs->journal_dev.rdev;
1265 jdev->mddev = &rs->md;
1266 jdev->bdev = rs->journal_dev.dev->bdev;
1267 jdev->sectors = bdev_nr_sectors(jdev->bdev);
1268 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1269 rs->ti->error = "No space for raid4/5/6 journal";
1272 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1273 set_bit(Journal, &jdev->flags);
1277 /* "journal_mode <mode>" ("journal_dev" mandatory!) */
1278 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1281 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1282 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1285 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1286 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1289 r = dm_raid_journal_mode_to_md(arg);
1291 rs->ti->error = "Invalid 'journal_mode' argument";
1294 rs->journal_dev.mode = r;
1299 * Parameters with number values from here on.
1301 if (kstrtoint(arg, 10, &value) < 0) {
1302 rs->ti->error = "Bad numerical argument given in raid params";
1306 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1308 * "rebuild" is being passed in by userspace to provide
1309 * indexes of replaced devices and to set up additional
1310 * devices on raid level takeover.
1312 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1313 rs->ti->error = "Invalid rebuild index given";
1317 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1318 rs->ti->error = "rebuild for this index already given";
1322 rd = rs->dev + value;
1323 clear_bit(In_sync, &rd->rdev.flags);
1324 clear_bit(Faulty, &rd->rdev.flags);
1325 rd->rdev.recovery_offset = 0;
1326 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1327 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1328 if (!rt_is_raid1(rt)) {
1329 rs->ti->error = "write_mostly option is only valid for RAID1";
1333 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1334 rs->ti->error = "Invalid write_mostly index given";
1339 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1340 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1341 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1342 if (!rt_is_raid1(rt)) {
1343 rs->ti->error = "max_write_behind option is only valid for RAID1";
1347 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1348 rs->ti->error = "Only one max_write_behind argument pair allowed";
1353 * In device-mapper, we specify things in sectors, but
1354 * MD records this value in kB
1356 if (value < 0 || value / 2 > COUNTER_MAX) {
1357 rs->ti->error = "Max write-behind limit out of range";
1361 rs->md.bitmap_info.max_write_behind = value / 2;
1362 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1363 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1364 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1368 rs->ti->error = "daemon sleep period out of range";
1371 rs->md.bitmap_info.daemon_sleep = value;
1372 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1373 /* Userspace passes new data_offset after having extended the data image LV */
1374 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1375 rs->ti->error = "Only one data_offset argument pair allowed";
1378 /* Ensure sensible data offset */
1380 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1381 rs->ti->error = "Bogus data_offset value";
1384 rs->data_offset = value;
1385 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1386 /* Define the +/-# of disks to add to/remove from the given raid set */
1387 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1388 rs->ti->error = "Only one delta_disks argument pair allowed";
1391 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
1392 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1393 rs->ti->error = "Too many delta_disk requested";
1397 rs->delta_disks = value;
1398 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1399 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1400 rs->ti->error = "Only one stripe_cache argument pair allowed";
1404 if (!rt_is_raid456(rt)) {
1405 rs->ti->error = "Inappropriate argument: stripe_cache";
1410 rs->ti->error = "Bogus stripe cache entries value";
1413 rs->stripe_cache_entries = value;
1414 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1415 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1416 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1421 rs->ti->error = "min_recovery_rate out of range";
1424 rs->md.sync_speed_min = value;
1425 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1426 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1427 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1432 rs->ti->error = "max_recovery_rate out of range";
1435 rs->md.sync_speed_max = value;
1436 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1437 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1438 rs->ti->error = "Only one region_size argument pair allowed";
1442 region_size = value;
1443 rs->requested_bitmap_chunk_sectors = value;
1444 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1445 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1446 rs->ti->error = "Only one raid10_copies argument pair allowed";
1450 if (!__within_range(value, 2, rs->md.raid_disks)) {
1451 rs->ti->error = "Bad value for 'raid10_copies'";
1455 raid10_copies = value;
1457 DMERR("Unable to parse RAID parameter: %s", key);
1458 rs->ti->error = "Unable to parse RAID parameter";
1463 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1464 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1465 rs->ti->error = "sync and nosync are mutually exclusive";
1469 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1470 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1471 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1472 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1476 if (write_mostly >= rs->md.raid_disks) {
1477 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1481 if (rs->md.sync_speed_max &&
1482 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1483 rs->ti->error = "Bogus recovery rates";
1487 if (validate_region_size(rs, region_size))
1490 if (rs->md.chunk_sectors)
1491 max_io_len = rs->md.chunk_sectors;
1493 max_io_len = region_size;
1495 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1498 if (rt_is_raid10(rt)) {
1499 if (raid10_copies > rs->md.raid_disks) {
1500 rs->ti->error = "Not enough devices to satisfy specification";
1504 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1505 if (rs->md.new_layout < 0) {
1506 rs->ti->error = "Error getting raid10 format";
1507 return rs->md.new_layout;
1510 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1512 rs->ti->error = "Failed to recognize new raid10 layout";
1516 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1517 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1518 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1519 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1524 rs->raid10_copies = raid10_copies;
1526 /* Assume there are no metadata devices until the drives are parsed */
1527 rs->md.persistent = 0;
1528 rs->md.external = 1;
1530 /* Check, if any invalid ctr arguments have been passed in for the raid level */
1531 return rs_check_for_valid_flags(rs);
1534 /* Set raid4/5/6 cache size */
1535 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1538 struct r5conf *conf;
1539 struct mddev *mddev = &rs->md;
1540 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1541 uint32_t nr_stripes = rs->stripe_cache_entries;
1543 if (!rt_is_raid456(rs->raid_type)) {
1544 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1548 if (nr_stripes < min_stripes) {
1549 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1550 nr_stripes, min_stripes);
1551 nr_stripes = min_stripes;
1554 conf = mddev->private;
1556 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1560 /* Try setting number of stripes in raid456 stripe cache */
1561 if (conf->min_nr_stripes != nr_stripes) {
1562 r = raid5_set_cache_size(mddev, nr_stripes);
1564 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1568 DMINFO("%u stripe cache entries", nr_stripes);
1574 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1575 static unsigned int mddev_data_stripes(struct raid_set *rs)
1577 return rs->md.raid_disks - rs->raid_type->parity_devs;
1580 /* Return # of data stripes of @rs (i.e. as of ctr) */
1581 static unsigned int rs_data_stripes(struct raid_set *rs)
1583 return rs->raid_disks - rs->raid_type->parity_devs;
1587 * Retrieve rdev->sectors from any valid raid device of @rs
1588 * to allow userpace to pass in arbitray "- -" device tupples.
1590 static sector_t __rdev_sectors(struct raid_set *rs)
1594 for (i = 0; i < rs->raid_disks; i++) {
1595 struct md_rdev *rdev = &rs->dev[i].rdev;
1597 if (!test_bit(Journal, &rdev->flags) &&
1598 rdev->bdev && rdev->sectors)
1599 return rdev->sectors;
1605 /* Check that calculated dev_sectors fits all component devices. */
1606 static int _check_data_dev_sectors(struct raid_set *rs)
1609 struct md_rdev *rdev;
1611 rdev_for_each(rdev, &rs->md)
1612 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1613 ds = min(ds, bdev_nr_sectors(rdev->bdev));
1614 if (ds < rs->md.dev_sectors) {
1615 rs->ti->error = "Component device(s) too small";
1623 /* Calculate the sectors per device and per array used for @rs */
1624 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
1627 unsigned int data_stripes;
1628 sector_t array_sectors = sectors, dev_sectors = sectors;
1629 struct mddev *mddev = &rs->md;
1632 delta_disks = mddev->delta_disks;
1633 data_stripes = mddev_data_stripes(rs);
1635 delta_disks = rs->delta_disks;
1636 data_stripes = rs_data_stripes(rs);
1639 /* Special raid1 case w/o delta_disks support (yet) */
1640 if (rt_is_raid1(rs->raid_type))
1642 else if (rt_is_raid10(rs->raid_type)) {
1643 if (rs->raid10_copies < 2 ||
1645 rs->ti->error = "Bogus raid10 data copies or delta disks";
1649 dev_sectors *= rs->raid10_copies;
1650 if (sector_div(dev_sectors, data_stripes))
1653 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1654 if (sector_div(array_sectors, rs->raid10_copies))
1657 } else if (sector_div(dev_sectors, data_stripes))
1661 /* Striped layouts */
1662 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1664 mddev->array_sectors = array_sectors;
1665 mddev->dev_sectors = dev_sectors;
1666 rs_set_rdev_sectors(rs);
1668 return _check_data_dev_sectors(rs);
1670 rs->ti->error = "Target length not divisible by number of data devices";
1674 /* Setup recovery on @rs */
1675 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1677 /* raid0 does not recover */
1678 if (rs_is_raid0(rs))
1679 rs->md.recovery_cp = MaxSector;
1681 * A raid6 set has to be recovered either
1682 * completely or for the grown part to
1683 * ensure proper parity and Q-Syndrome
1685 else if (rs_is_raid6(rs))
1686 rs->md.recovery_cp = dev_sectors;
1688 * Other raid set types may skip recovery
1689 * depending on the 'nosync' flag.
1692 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1693 ? MaxSector : dev_sectors;
1696 static void do_table_event(struct work_struct *ws)
1698 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1700 smp_rmb(); /* Make sure we access most actual mddev properties */
1701 if (!rs_is_reshaping(rs)) {
1702 if (rs_is_raid10(rs))
1703 rs_set_rdev_sectors(rs);
1704 rs_set_capacity(rs);
1706 dm_table_event(rs->ti->table);
1710 * Make sure a valid takover (level switch) is being requested on @rs
1712 * Conversions of raid sets from one MD personality to another
1713 * have to conform to restrictions which are enforced here.
1715 static int rs_check_takeover(struct raid_set *rs)
1717 struct mddev *mddev = &rs->md;
1718 unsigned int near_copies;
1720 if (rs->md.degraded) {
1721 rs->ti->error = "Can't takeover degraded raid set";
1725 if (rs_is_reshaping(rs)) {
1726 rs->ti->error = "Can't takeover reshaping raid set";
1730 switch (mddev->level) {
1732 /* raid0 -> raid1/5 with one disk */
1733 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1734 mddev->raid_disks == 1)
1737 /* raid0 -> raid10 */
1738 if (mddev->new_level == 10 &&
1739 !(rs->raid_disks % mddev->raid_disks))
1742 /* raid0 with multiple disks -> raid4/5/6 */
1743 if (__within_range(mddev->new_level, 4, 6) &&
1744 mddev->new_layout == ALGORITHM_PARITY_N &&
1745 mddev->raid_disks > 1)
1751 /* Can't takeover raid10_offset! */
1752 if (__is_raid10_offset(mddev->layout))
1755 near_copies = __raid10_near_copies(mddev->layout);
1757 /* raid10* -> raid0 */
1758 if (mddev->new_level == 0) {
1759 /* Can takeover raid10_near with raid disks divisable by data copies! */
1760 if (near_copies > 1 &&
1761 !(mddev->raid_disks % near_copies)) {
1762 mddev->raid_disks /= near_copies;
1763 mddev->delta_disks = mddev->raid_disks;
1767 /* Can takeover raid10_far */
1768 if (near_copies == 1 &&
1769 __raid10_far_copies(mddev->layout) > 1)
1775 /* raid10_{near,far} -> raid1 */
1776 if (mddev->new_level == 1 &&
1777 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1780 /* raid10_{near,far} with 2 disks -> raid4/5 */
1781 if (__within_range(mddev->new_level, 4, 5) &&
1782 mddev->raid_disks == 2)
1787 /* raid1 with 2 disks -> raid4/5 */
1788 if (__within_range(mddev->new_level, 4, 5) &&
1789 mddev->raid_disks == 2) {
1790 mddev->degraded = 1;
1794 /* raid1 -> raid0 */
1795 if (mddev->new_level == 0 &&
1796 mddev->raid_disks == 1)
1799 /* raid1 -> raid10 */
1800 if (mddev->new_level == 10)
1805 /* raid4 -> raid0 */
1806 if (mddev->new_level == 0)
1809 /* raid4 -> raid1/5 with 2 disks */
1810 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1811 mddev->raid_disks == 2)
1814 /* raid4 -> raid5/6 with parity N */
1815 if (__within_range(mddev->new_level, 5, 6) &&
1816 mddev->layout == ALGORITHM_PARITY_N)
1821 /* raid5 with parity N -> raid0 */
1822 if (mddev->new_level == 0 &&
1823 mddev->layout == ALGORITHM_PARITY_N)
1826 /* raid5 with parity N -> raid4 */
1827 if (mddev->new_level == 4 &&
1828 mddev->layout == ALGORITHM_PARITY_N)
1831 /* raid5 with 2 disks -> raid1/4/10 */
1832 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1833 mddev->raid_disks == 2)
1836 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
1837 if (mddev->new_level == 6 &&
1838 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1839 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1844 /* raid6 with parity N -> raid0 */
1845 if (mddev->new_level == 0 &&
1846 mddev->layout == ALGORITHM_PARITY_N)
1849 /* raid6 with parity N -> raid4 */
1850 if (mddev->new_level == 4 &&
1851 mddev->layout == ALGORITHM_PARITY_N)
1854 /* raid6_*_n with Q-Syndrome N -> raid5_* */
1855 if (mddev->new_level == 5 &&
1856 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1857 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1865 rs->ti->error = "takeover not possible";
1869 /* True if @rs requested to be taken over */
1870 static bool rs_takeover_requested(struct raid_set *rs)
1872 return rs->md.new_level != rs->md.level;
1875 /* True if layout is set to reshape. */
1876 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
1878 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
1879 rs->md.new_layout != rs->md.layout ||
1880 rs->md.new_chunk_sectors != rs->md.chunk_sectors;
1883 /* True if @rs is requested to reshape by ctr */
1884 static bool rs_reshape_requested(struct raid_set *rs)
1887 struct mddev *mddev = &rs->md;
1889 if (rs_takeover_requested(rs))
1892 if (rs_is_raid0(rs))
1895 change = rs_is_layout_change(rs, false);
1897 /* Historical case to support raid1 reshape without delta disks */
1898 if (rs_is_raid1(rs)) {
1899 if (rs->delta_disks)
1900 return !!rs->delta_disks;
1903 mddev->raid_disks != rs->raid_disks;
1906 if (rs_is_raid10(rs))
1908 !__is_raid10_far(mddev->new_layout) &&
1909 rs->delta_disks >= 0;
1915 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
1917 /* State flags for sb->flags */
1918 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1919 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1922 * This structure is never routinely used by userspace, unlike md superblocks.
1923 * Devices with this superblock should only ever be accessed via device-mapper.
1925 #define DM_RAID_MAGIC 0x64526D44
1926 struct dm_raid_superblock {
1927 __le32 magic; /* "DmRd" */
1928 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
1930 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
1931 __le32 array_position; /* The position of this drive in the raid set */
1933 __le64 events; /* Incremented by md when superblock updated */
1934 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
1935 /* indicate failures (see extension below) */
1938 * This offset tracks the progress of the repair or replacement of
1939 * an individual drive.
1941 __le64 disk_recovery_offset;
1944 * This offset tracks the progress of the initial raid set
1945 * synchronisation/parity calculation.
1947 __le64 array_resync_offset;
1950 * raid characteristics
1954 __le32 stripe_sectors;
1956 /********************************************************************
1957 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
1959 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
1962 __le32 flags; /* Flags defining array states for reshaping */
1965 * This offset tracks the progress of a raid
1966 * set reshape in order to be able to restart it
1968 __le64 reshape_position;
1971 * These define the properties of the array in case of an interrupted reshape
1975 __le32 new_stripe_sectors;
1978 __le64 array_sectors; /* Array size in sectors */
1981 * Sector offsets to data on devices (reshaping).
1982 * Needed to support out of place reshaping, thus
1983 * not writing over any stripes whilst converting
1984 * them from old to new layout
1987 __le64 new_data_offset;
1989 __le64 sectors; /* Used device size in sectors */
1992 * Additional Bit field of devices indicating failures to support
1993 * up to 256 devices with the 1.9.0 on-disk metadata format
1995 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1997 __le32 incompat_features; /* Used to indicate any incompatible features */
1999 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
2003 * Check for reshape constraints on raid set @rs:
2005 * - reshape function non-existent
2007 * - ongoing recovery
2010 * Returns 0 if none or -EPERM if given constraint
2011 * and error message reference in @errmsg
2013 static int rs_check_reshape(struct raid_set *rs)
2015 struct mddev *mddev = &rs->md;
2017 if (!mddev->pers || !mddev->pers->check_reshape)
2018 rs->ti->error = "Reshape not supported";
2019 else if (mddev->degraded)
2020 rs->ti->error = "Can't reshape degraded raid set";
2021 else if (rs_is_recovering(rs))
2022 rs->ti->error = "Convert request on recovering raid set prohibited";
2023 else if (rs_is_reshaping(rs))
2024 rs->ti->error = "raid set already reshaping!";
2025 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2026 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2033 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2035 BUG_ON(!rdev->sb_page);
2037 if (rdev->sb_loaded && !force_reload)
2040 rdev->sb_loaded = 0;
2042 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
2043 DMERR("Failed to read superblock of device at position %d",
2045 md_error(rdev->mddev, rdev);
2046 set_bit(Faulty, &rdev->flags);
2050 rdev->sb_loaded = 1;
2055 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2057 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2058 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2060 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2061 int i = ARRAY_SIZE(sb->extended_failed_devices);
2064 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2068 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2070 int i = ARRAY_SIZE(sb->extended_failed_devices);
2072 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2074 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2078 * Synchronize the superblock members with the raid set properties
2080 * All superblock data is little endian.
2082 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2084 bool update_failed_devices = false;
2086 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2087 struct dm_raid_superblock *sb;
2088 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2090 /* No metadata device, no superblock */
2091 if (!rdev->meta_bdev)
2094 BUG_ON(!rdev->sb_page);
2096 sb = page_address(rdev->sb_page);
2098 sb_retrieve_failed_devices(sb, failed_devices);
2100 for (i = 0; i < rs->raid_disks; i++)
2101 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2102 update_failed_devices = true;
2103 set_bit(i, (void *) failed_devices);
2106 if (update_failed_devices)
2107 sb_update_failed_devices(sb, failed_devices);
2109 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2110 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2112 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2113 sb->array_position = cpu_to_le32(rdev->raid_disk);
2115 sb->events = cpu_to_le64(mddev->events);
2117 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2118 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2120 sb->level = cpu_to_le32(mddev->level);
2121 sb->layout = cpu_to_le32(mddev->layout);
2122 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2124 /********************************************************************
2125 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
2127 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
2129 sb->new_level = cpu_to_le32(mddev->new_level);
2130 sb->new_layout = cpu_to_le32(mddev->new_layout);
2131 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2133 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2135 smp_rmb(); /* Make sure we access most recent reshape position */
2136 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2137 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2138 /* Flag ongoing reshape */
2139 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2141 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2142 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2144 /* Clear reshape flags */
2145 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2148 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2149 sb->data_offset = cpu_to_le64(rdev->data_offset);
2150 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2151 sb->sectors = cpu_to_le64(rdev->sectors);
2152 sb->incompat_features = cpu_to_le32(0);
2154 /* Zero out the rest of the payload after the size of the superblock */
2155 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2161 * This function creates a superblock if one is not found on the device
2162 * and will decide which superblock to use if there's a choice.
2164 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2166 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2169 struct dm_raid_superblock *sb;
2170 struct dm_raid_superblock *refsb;
2171 uint64_t events_sb, events_refsb;
2173 r = read_disk_sb(rdev, rdev->sb_size, false);
2177 sb = page_address(rdev->sb_page);
2180 * Two cases that we want to write new superblocks and rebuild:
2181 * 1) New device (no matching magic number)
2182 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
2184 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2185 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2186 super_sync(rdev->mddev, rdev);
2188 set_bit(FirstUse, &rdev->flags);
2189 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2191 /* Force writing of superblocks to disk */
2192 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2194 /* Any superblock is better than none, choose that if given */
2195 return refdev ? 0 : 1;
2201 events_sb = le64_to_cpu(sb->events);
2203 refsb = page_address(refdev->sb_page);
2204 events_refsb = le64_to_cpu(refsb->events);
2206 return (events_sb > events_refsb) ? 1 : 0;
2209 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2213 struct mddev *mddev = &rs->md;
2215 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2216 struct dm_raid_superblock *sb;
2217 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2219 struct dm_raid_superblock *sb2;
2221 sb = page_address(rdev->sb_page);
2222 events_sb = le64_to_cpu(sb->events);
2225 * Initialise to 1 if this is a new superblock.
2227 mddev->events = events_sb ? : 1;
2229 mddev->reshape_position = MaxSector;
2231 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2232 mddev->level = le32_to_cpu(sb->level);
2233 mddev->layout = le32_to_cpu(sb->layout);
2234 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2237 * Reshaping is supported, e.g. reshape_position is valid
2238 * in superblock and superblock content is authoritative.
2240 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2241 /* Superblock is authoritative wrt given raid set layout! */
2242 mddev->new_level = le32_to_cpu(sb->new_level);
2243 mddev->new_layout = le32_to_cpu(sb->new_layout);
2244 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2245 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2246 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2248 /* raid was reshaping and got interrupted */
2249 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2250 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2251 DMERR("Reshape requested but raid set is still reshaping");
2255 if (mddev->delta_disks < 0 ||
2256 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2257 mddev->reshape_backwards = 1;
2259 mddev->reshape_backwards = 0;
2261 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2262 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2267 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2269 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2270 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2272 if (rs_takeover_requested(rs)) {
2273 if (rt_cur && rt_new)
2274 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2275 rt_cur->name, rt_new->name);
2277 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2279 } else if (rs_reshape_requested(rs)) {
2280 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2281 if (mddev->layout != mddev->new_layout) {
2282 if (rt_cur && rt_new)
2283 DMERR(" current layout %s vs new layout %s",
2284 rt_cur->name, rt_new->name);
2286 DMERR(" current layout 0x%X vs new layout 0x%X",
2287 le32_to_cpu(sb->layout), mddev->new_layout);
2289 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2290 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2291 mddev->chunk_sectors, mddev->new_chunk_sectors);
2292 if (rs->delta_disks)
2293 DMERR(" current %u disks vs new %u disks",
2294 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2295 if (rs_is_raid10(rs)) {
2296 DMERR(" Old layout: %s w/ %u copies",
2297 raid10_md_layout_to_format(mddev->layout),
2298 raid10_md_layout_to_copies(mddev->layout));
2299 DMERR(" New layout: %s w/ %u copies",
2300 raid10_md_layout_to_format(mddev->new_layout),
2301 raid10_md_layout_to_copies(mddev->new_layout));
2306 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2309 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2310 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2313 * During load, we set FirstUse if a new superblock was written.
2314 * There are two reasons we might not have a superblock:
2315 * 1) The raid set is brand new - in which case, all of the
2316 * devices must have their In_sync bit set. Also,
2317 * recovery_cp must be 0, unless forced.
2318 * 2) This is a new device being added to an old raid set
2319 * and the new device needs to be rebuilt - in which
2320 * case the In_sync bit will /not/ be set and
2321 * recovery_cp must be MaxSector.
2322 * 3) This is/are a new device(s) being added to an old
2323 * raid set during takeover to a higher raid level
2324 * to provide capacity for redundancy or during reshape
2325 * to add capacity to grow the raid set.
2328 rdev_for_each(r, mddev) {
2329 if (test_bit(Journal, &rdev->flags))
2332 if (test_bit(FirstUse, &r->flags))
2335 if (!test_bit(In_sync, &r->flags)) {
2336 DMINFO("Device %d specified for rebuild; clearing superblock",
2340 if (test_bit(FirstUse, &r->flags))
2347 if (new_devs == rs->raid_disks || !rebuilds) {
2348 /* Replace a broken device */
2349 if (new_devs == rs->raid_disks) {
2350 DMINFO("Superblocks created for new raid set");
2351 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2352 } else if (new_devs != rebuilds &&
2353 new_devs != rs->delta_disks) {
2354 DMERR("New device injected into existing raid set without "
2355 "'delta_disks' or 'rebuild' parameter specified");
2358 } else if (new_devs && new_devs != rebuilds) {
2359 DMERR("%u 'rebuild' devices cannot be injected into"
2360 " a raid set with %u other first-time devices",
2361 rebuilds, new_devs);
2363 } else if (rebuilds) {
2364 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2365 DMERR("new device%s provided without 'rebuild'",
2366 new_devs > 1 ? "s" : "");
2368 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2369 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2370 (unsigned long long) mddev->recovery_cp);
2372 } else if (rs_is_reshaping(rs)) {
2373 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2374 (unsigned long long) mddev->reshape_position);
2380 * Now we set the Faulty bit for those devices that are
2381 * recorded in the superblock as failed.
2383 sb_retrieve_failed_devices(sb, failed_devices);
2384 rdev_for_each(r, mddev) {
2385 if (test_bit(Journal, &rdev->flags) ||
2388 sb2 = page_address(r->sb_page);
2389 sb2->failed_devices = 0;
2390 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2393 * Check for any device re-ordering.
2395 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2396 role = le32_to_cpu(sb2->array_position);
2400 if (role != r->raid_disk) {
2401 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2402 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2403 rs->raid_disks % rs->raid10_copies) {
2405 "Cannot change raid10 near set to odd # of devices!";
2409 sb2->array_position = cpu_to_le32(r->raid_disk);
2411 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2412 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2413 !rt_is_raid1(rs->raid_type)) {
2414 rs->ti->error = "Cannot change device positions in raid set";
2418 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2422 * Partial recovery is performed on
2423 * returning failed devices.
2425 if (test_bit(role, (void *) failed_devices))
2426 set_bit(Faulty, &r->flags);
2433 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2435 struct mddev *mddev = &rs->md;
2436 struct dm_raid_superblock *sb;
2438 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2441 sb = page_address(rdev->sb_page);
2444 * If mddev->events is not set, we know we have not yet initialized
2447 if (!mddev->events && super_init_validation(rs, rdev))
2450 if (le32_to_cpu(sb->compat_features) &&
2451 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2452 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2456 if (sb->incompat_features) {
2457 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2461 /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */
2462 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2463 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2465 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2467 * Retrieve rdev size stored in superblock to be prepared for shrink.
2468 * Check extended superblock members are present otherwise the size
2471 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2472 rdev->sectors = le64_to_cpu(sb->sectors);
2474 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2475 if (rdev->recovery_offset == MaxSector)
2476 set_bit(In_sync, &rdev->flags);
2478 * If no reshape in progress -> we're recovering single
2479 * disk(s) and have to set the device(s) to out-of-sync
2481 else if (!rs_is_reshaping(rs))
2482 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2486 * If a device comes back, set it as not In_sync and no longer faulty.
2488 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2489 rdev->recovery_offset = 0;
2490 clear_bit(In_sync, &rdev->flags);
2491 rdev->saved_raid_disk = rdev->raid_disk;
2494 /* Reshape support -> restore repective data offsets */
2495 rdev->data_offset = le64_to_cpu(sb->data_offset);
2496 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2502 * Analyse superblocks and select the freshest.
2504 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2507 struct md_rdev *rdev, *freshest;
2508 struct mddev *mddev = &rs->md;
2511 rdev_for_each(rdev, mddev) {
2512 if (test_bit(Journal, &rdev->flags))
2515 if (!rdev->meta_bdev)
2518 /* Set superblock offset/size for metadata device. */
2520 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2521 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2522 DMERR("superblock size of a logical block is no longer valid");
2527 * Skipping super_load due to CTR_FLAG_SYNC will cause
2528 * the array to undergo initialization again as
2529 * though it were new. This is the intended effect
2530 * of the "sync" directive.
2532 * With reshaping capability added, we must ensure that
2533 * the "sync" directive is disallowed during the reshape.
2535 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2538 r = super_load(rdev, freshest);
2547 /* This is a failure to read the superblock from the metadata device. */
2549 * We have to keep any raid0 data/metadata device pairs or
2550 * the MD raid0 personality will fail to start the array.
2552 if (rs_is_raid0(rs))
2556 * We keep the dm_devs to be able to emit the device tuple
2557 * properly on the table line in raid_status() (rather than
2558 * mistakenly acting as if '- -' got passed into the constructor).
2560 * The rdev has to stay on the same_set list to allow for
2561 * the attempt to restore faulty devices on second resume.
2563 rdev->raid_disk = rdev->saved_raid_disk = -1;
2572 * Validation of the freshest device provides the source of
2573 * validation for the remaining devices.
2575 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2576 if (super_validate(rs, freshest))
2579 if (validate_raid_redundancy(rs)) {
2580 rs->ti->error = "Insufficient redundancy to activate array";
2584 rdev_for_each(rdev, mddev)
2585 if (!test_bit(Journal, &rdev->flags) &&
2587 super_validate(rs, rdev))
2593 * Adjust data_offset and new_data_offset on all disk members of @rs
2594 * for out of place reshaping if requested by constructor
2596 * We need free space at the beginning of each raid disk for forward
2597 * and at the end for backward reshapes which userspace has to provide
2598 * via remapping/reordering of space.
2600 static int rs_adjust_data_offsets(struct raid_set *rs)
2602 sector_t data_offset = 0, new_data_offset = 0;
2603 struct md_rdev *rdev;
2605 /* Constructor did not request data offset change */
2606 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2607 if (!rs_is_reshapable(rs))
2613 /* HM FIXME: get In_Sync raid_dev? */
2614 rdev = &rs->dev[0].rdev;
2616 if (rs->delta_disks < 0) {
2618 * Removing disks (reshaping backwards):
2620 * - before reshape: data is at offset 0 and free space
2621 * is at end of each component LV
2623 * - after reshape: data is at offset rs->data_offset != 0 on each component LV
2626 new_data_offset = rs->data_offset;
2628 } else if (rs->delta_disks > 0) {
2630 * Adding disks (reshaping forwards):
2632 * - before reshape: data is at offset rs->data_offset != 0 and
2633 * free space is at begin of each component LV
2635 * - after reshape: data is at offset 0 on each component LV
2637 data_offset = rs->data_offset;
2638 new_data_offset = 0;
2642 * User space passes in 0 for data offset after having removed reshape space
2644 * - or - (data offset != 0)
2646 * Changing RAID layout or chunk size -> toggle offsets
2648 * - before reshape: data is at offset rs->data_offset 0 and
2649 * free space is at end of each component LV
2651 * data is at offset rs->data_offset != 0 and
2652 * free space is at begin of each component LV
2654 * - after reshape: data is at offset 0 if it was at offset != 0
2655 * or at offset != 0 if it was at offset 0
2656 * on each component LV
2659 data_offset = rs->data_offset ? rdev->data_offset : 0;
2660 new_data_offset = data_offset ? 0 : rs->data_offset;
2661 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2665 * Make sure we got a minimum amount of free sectors per device
2667 if (rs->data_offset &&
2668 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2669 rs->ti->error = data_offset ? "No space for forward reshape" :
2670 "No space for backward reshape";
2675 * Raise recovery_cp in case data_offset != 0 to
2676 * avoid false recovery positives in the constructor.
2678 if (rs->md.recovery_cp < rs->md.dev_sectors)
2679 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2681 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
2682 rdev_for_each(rdev, &rs->md) {
2683 if (!test_bit(Journal, &rdev->flags)) {
2684 rdev->data_offset = data_offset;
2685 rdev->new_data_offset = new_data_offset;
2692 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */
2693 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2696 struct md_rdev *rdev;
2698 rdev_for_each(rdev, &rs->md) {
2699 if (!test_bit(Journal, &rdev->flags)) {
2700 rdev->raid_disk = i++;
2701 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2707 * Setup @rs for takeover by a different raid level
2709 static int rs_setup_takeover(struct raid_set *rs)
2711 struct mddev *mddev = &rs->md;
2712 struct md_rdev *rdev;
2713 unsigned int d = mddev->raid_disks = rs->raid_disks;
2714 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2716 if (rt_is_raid10(rs->raid_type)) {
2717 if (rs_is_raid0(rs)) {
2718 /* Userpace reordered disks -> adjust raid_disk indexes */
2719 __reorder_raid_disk_indexes(rs);
2721 /* raid0 -> raid10_far layout */
2722 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2724 } else if (rs_is_raid1(rs))
2725 /* raid1 -> raid10_near layout */
2726 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2733 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2734 mddev->recovery_cp = MaxSector;
2737 rdev = &rs->dev[d].rdev;
2739 if (test_bit(d, (void *) rs->rebuild_disks)) {
2740 clear_bit(In_sync, &rdev->flags);
2741 clear_bit(Faulty, &rdev->flags);
2742 mddev->recovery_cp = rdev->recovery_offset = 0;
2743 /* Bitmap has to be created when we do an "up" takeover */
2744 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2747 rdev->new_data_offset = new_data_offset;
2753 /* Prepare @rs for reshape */
2754 static int rs_prepare_reshape(struct raid_set *rs)
2757 struct mddev *mddev = &rs->md;
2759 if (rs_is_raid10(rs)) {
2760 if (rs->raid_disks != mddev->raid_disks &&
2761 __is_raid10_near(mddev->layout) &&
2762 rs->raid10_copies &&
2763 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2765 * raid disk have to be multiple of data copies to allow this conversion,
2767 * This is actually not a reshape it is a
2768 * rebuild of any additional mirrors per group
2770 if (rs->raid_disks % rs->raid10_copies) {
2771 rs->ti->error = "Can't reshape raid10 mirror groups";
2775 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
2776 __reorder_raid_disk_indexes(rs);
2777 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2779 mddev->new_layout = mddev->layout;
2784 } else if (rs_is_raid456(rs))
2787 else if (rs_is_raid1(rs)) {
2788 if (rs->delta_disks) {
2789 /* Process raid1 via delta_disks */
2790 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2793 /* Process raid1 without delta_disks */
2794 mddev->raid_disks = rs->raid_disks;
2798 rs->ti->error = "Called with bogus raid type";
2803 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2804 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2805 } else if (mddev->raid_disks < rs->raid_disks)
2806 /* Create new superblocks and bitmaps, if any new disks */
2807 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2812 /* Get reshape sectors from data_offsets or raid set */
2813 static sector_t _get_reshape_sectors(struct raid_set *rs)
2815 struct md_rdev *rdev;
2816 sector_t reshape_sectors = 0;
2818 rdev_for_each(rdev, &rs->md)
2819 if (!test_bit(Journal, &rdev->flags)) {
2820 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2821 rdev->data_offset - rdev->new_data_offset :
2822 rdev->new_data_offset - rdev->data_offset;
2826 return max(reshape_sectors, (sector_t) rs->data_offset);
2831 * - change raid layout
2832 * - change chunk size
2836 static int rs_setup_reshape(struct raid_set *rs)
2839 unsigned int cur_raid_devs, d;
2840 sector_t reshape_sectors = _get_reshape_sectors(rs);
2841 struct mddev *mddev = &rs->md;
2842 struct md_rdev *rdev;
2844 mddev->delta_disks = rs->delta_disks;
2845 cur_raid_devs = mddev->raid_disks;
2847 /* Ignore impossible layout change whilst adding/removing disks */
2848 if (mddev->delta_disks &&
2849 mddev->layout != mddev->new_layout) {
2850 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2851 mddev->new_layout = mddev->layout;
2855 * Adjust array size:
2857 * - in case of adding disk(s), array size has
2858 * to grow after the disk adding reshape,
2859 * which'll happen in the event handler;
2860 * reshape will happen forward, so space has to
2861 * be available at the beginning of each disk
2863 * - in case of removing disk(s), array size
2864 * has to shrink before starting the reshape,
2865 * which'll happen here;
2866 * reshape will happen backward, so space has to
2867 * be available at the end of each disk
2869 * - data_offset and new_data_offset are
2870 * adjusted for aforementioned out of place
2871 * reshaping based on userspace passing in
2872 * the "data_offset <sectors>" key/value
2873 * pair via the constructor
2877 if (rs->delta_disks > 0) {
2878 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
2879 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2880 rdev = &rs->dev[d].rdev;
2881 clear_bit(In_sync, &rdev->flags);
2884 * save_raid_disk needs to be -1, or recovery_offset will be set to 0
2885 * by md, which'll store that erroneously in the superblock on reshape
2887 rdev->saved_raid_disk = -1;
2888 rdev->raid_disk = d;
2890 rdev->sectors = mddev->dev_sectors;
2891 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2894 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
2896 /* Remove disk(s) */
2897 } else if (rs->delta_disks < 0) {
2898 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
2899 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2901 /* Change layout and/or chunk size */
2904 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
2906 * keeping number of disks and do layout change ->
2908 * toggle reshape_backward depending on data_offset:
2910 * - free space upfront -> reshape forward
2912 * - free space at the end -> reshape backward
2915 * This utilizes free reshape space avoiding the need
2916 * for userspace to move (parts of) LV segments in
2917 * case of layout/chunksize change (for disk
2918 * adding/removing reshape space has to be at
2919 * the proper address (see above with delta_disks):
2921 * add disk(s) -> begin
2922 * remove disk(s)-> end
2924 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2928 * Adjust device size for forward reshape
2929 * because md_finish_reshape() reduces it.
2931 if (!mddev->reshape_backwards)
2932 rdev_for_each(rdev, &rs->md)
2933 if (!test_bit(Journal, &rdev->flags))
2934 rdev->sectors += reshape_sectors;
2940 * If the md resync thread has updated superblock with max reshape position
2941 * at the end of a reshape but not (yet) reset the layout configuration
2942 * changes -> reset the latter.
2944 static void rs_reset_inconclusive_reshape(struct raid_set *rs)
2946 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
2948 rs->md.delta_disks = 0;
2949 rs->md.reshape_backwards = 0;
2954 * Enable/disable discard support on RAID set depending on
2955 * RAID level and discard properties of underlying RAID members.
2957 static void configure_discard_support(struct raid_set *rs)
2961 struct dm_target *ti = rs->ti;
2964 * XXX: RAID level 4,5,6 require zeroing for safety.
2966 raid456 = rs_is_raid456(rs);
2968 for (i = 0; i < rs->raid_disks; i++) {
2969 if (!rs->dev[i].rdev.bdev ||
2970 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
2974 if (!devices_handle_discard_safely) {
2975 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2976 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2982 ti->num_discard_bios = 1;
2986 * Construct a RAID0/1/10/4/5/6 mapping:
2988 * <raid_type> <#raid_params> <raid_params>{0,} \
2989 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
2991 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
2992 * details on possible <raid_params>.
2994 * Userspace is free to initialize the metadata devices, hence the superblocks to
2995 * enforce recreation based on the passed in table parameters.
2998 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3001 bool resize = false;
3002 struct raid_type *rt;
3003 unsigned int num_raid_params, num_raid_devs;
3004 sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
3005 struct raid_set *rs = NULL;
3007 struct rs_layout rs_layout;
3008 struct dm_arg_set as = { argc, argv }, as_nrd;
3009 struct dm_arg _args[] = {
3010 { 0, as.argc, "Cannot understand number of raid parameters" },
3011 { 1, 254, "Cannot understand number of raid devices parameters" }
3014 arg = dm_shift_arg(&as);
3016 ti->error = "No arguments";
3020 rt = get_raid_type(arg);
3022 ti->error = "Unrecognised raid_type";
3026 /* Must have <#raid_params> */
3027 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3030 /* number of raid device tupples <meta_dev data_dev> */
3032 dm_consume_args(&as_nrd, num_raid_params);
3033 _args[1].max = (as_nrd.argc - 1) / 2;
3034 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3037 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3038 ti->error = "Invalid number of supplied raid devices";
3042 rs = raid_set_alloc(ti, rt, num_raid_devs);
3046 r = parse_raid_params(rs, &as, num_raid_params);
3050 r = parse_dev_params(rs, &as);
3054 rs->md.sync_super = super_sync;
3057 * Calculate ctr requested array and device sizes to allow
3058 * for superblock analysis needing device sizes defined.
3060 * Any existing superblock will overwrite the array and device sizes
3062 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3066 /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */
3067 rs->array_sectors = rs->md.array_sectors;
3068 rs->dev_sectors = rs->md.dev_sectors;
3071 * Backup any new raid set level, layout, ...
3072 * requested to be able to compare to superblock
3073 * members for conversion decisions.
3075 rs_config_backup(rs, &rs_layout);
3077 r = analyse_superblocks(ti, rs);
3081 /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */
3082 sb_array_sectors = rs->md.array_sectors;
3083 rdev_sectors = __rdev_sectors(rs);
3084 if (!rdev_sectors) {
3085 ti->error = "Invalid rdev size";
3091 reshape_sectors = _get_reshape_sectors(rs);
3092 if (rs->dev_sectors != rdev_sectors) {
3093 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
3094 if (rs->dev_sectors > rdev_sectors - reshape_sectors)
3095 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3098 INIT_WORK(&rs->md.event_work, do_table_event);
3100 ti->num_flush_bios = 1;
3101 ti->needs_bio_set_dev = true;
3103 /* Restore any requested new layout for conversion decision */
3104 rs_config_restore(rs, &rs_layout);
3107 * Now that we have any superblock metadata available,
3108 * check for new, recovering, reshaping, to be taken over,
3109 * to be reshaped or an existing, unchanged raid set to
3112 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3113 /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
3114 if (rs_is_raid6(rs) &&
3115 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3116 ti->error = "'nosync' not allowed for new raid6 set";
3120 rs_setup_recovery(rs, 0);
3121 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3123 } else if (rs_is_recovering(rs)) {
3124 /* A recovering raid set may be resized */
3126 } else if (rs_is_reshaping(rs)) {
3127 /* Have to reject size change request during reshape */
3129 ti->error = "Can't resize a reshaping raid set";
3134 } else if (rs_takeover_requested(rs)) {
3135 if (rs_is_reshaping(rs)) {
3136 ti->error = "Can't takeover a reshaping raid set";
3141 /* We can't takeover a journaled raid4/5/6 */
3142 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3143 ti->error = "Can't takeover a journaled raid4/5/6 set";
3149 * If a takeover is needed, userspace sets any additional
3150 * devices to rebuild and we can check for a valid request here.
3152 * If acceptable, set the level to the new requested
3153 * one, prohibit requesting recovery, allow the raid
3154 * set to run and store superblocks during resume.
3156 r = rs_check_takeover(rs);
3160 r = rs_setup_takeover(rs);
3164 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3165 /* Takeover ain't recovery, so disable recovery */
3166 rs_setup_recovery(rs, MaxSector);
3168 } else if (rs_reshape_requested(rs)) {
3169 /* Only request grow on raid set size extensions, not on reshapes. */
3170 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3173 * No need to check for 'ongoing' takeover here, because takeover
3174 * is an instant operation as oposed to an ongoing reshape.
3177 /* We can't reshape a journaled raid4/5/6 */
3178 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3179 ti->error = "Can't reshape a journaled raid4/5/6 set";
3184 /* Out-of-place space has to be available to allow for a reshape unless raid1! */
3185 if (reshape_sectors || rs_is_raid1(rs)) {
3187 * We can only prepare for a reshape here, because the
3188 * raid set needs to run to provide the repective reshape
3189 * check functions via its MD personality instance.
3191 * So do the reshape check after md_run() succeeded.
3193 r = rs_prepare_reshape(rs);
3197 /* Reshaping ain't recovery, so disable recovery */
3198 rs_setup_recovery(rs, MaxSector);
3203 /* May not set recovery when a device rebuild is requested */
3204 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3205 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3206 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3207 rs_setup_recovery(rs, MaxSector);
3208 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3210 * Set raid set to current size, i.e. size as of
3211 * superblocks to grow to larger size in preresume.
3213 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
3217 rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
3219 /* This is no size change or it is shrinking, update size and record in superblocks */
3220 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3224 if (sb_array_sectors > rs->array_sectors)
3225 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3230 /* If constructor requested it, change data and new_data offsets */
3231 r = rs_adjust_data_offsets(rs);
3235 /* Catch any inconclusive reshape superblock content. */
3236 rs_reset_inconclusive_reshape(rs);
3238 /* Start raid set read-only and assumed clean to change in raid_resume() */
3242 /* Keep array frozen until resume. */
3243 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3245 /* Has to be held on running the array */
3246 mddev_lock_nointr(&rs->md);
3247 r = md_run(&rs->md);
3248 rs->md.in_sync = 0; /* Assume already marked dirty */
3250 ti->error = "Failed to run raid array";
3251 mddev_unlock(&rs->md);
3255 r = md_start(&rs->md);
3257 ti->error = "Failed to start raid array";
3258 mddev_unlock(&rs->md);
3262 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
3263 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3264 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3266 ti->error = "Failed to set raid4/5/6 journal mode";
3267 mddev_unlock(&rs->md);
3268 goto bad_journal_mode_set;
3272 mddev_suspend(&rs->md);
3273 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3275 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3276 if (rs_is_raid456(rs)) {
3277 r = rs_set_raid456_stripe_cache(rs);
3279 goto bad_stripe_cache;
3282 /* Now do an early reshape check */
3283 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3284 r = rs_check_reshape(rs);
3286 goto bad_check_reshape;
3288 /* Restore new, ctr requested layout to perform check */
3289 rs_config_restore(rs, &rs_layout);
3291 if (rs->md.pers->start_reshape) {
3292 r = rs->md.pers->check_reshape(&rs->md);
3294 ti->error = "Reshape check failed";
3295 goto bad_check_reshape;
3300 /* Disable/enable discard support on raid set. */
3301 configure_discard_support(rs);
3303 mddev_unlock(&rs->md);
3307 bad_journal_mode_set:
3317 static void raid_dtr(struct dm_target *ti)
3319 struct raid_set *rs = ti->private;
3325 static int raid_map(struct dm_target *ti, struct bio *bio)
3327 struct raid_set *rs = ti->private;
3328 struct mddev *mddev = &rs->md;
3331 * If we're reshaping to add disk(s)), ti->len and
3332 * mddev->array_sectors will differ during the process
3333 * (ti->len > mddev->array_sectors), so we have to requeue
3334 * bios with addresses > mddev->array_sectors here or
3335 * there will occur accesses past EOD of the component
3336 * data images thus erroring the raid set.
3338 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3339 return DM_MAPIO_REQUEUE;
3341 md_handle_request(mddev, bio);
3343 return DM_MAPIO_SUBMITTED;
3346 /* Return sync state string for @state */
3347 enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3348 static const char *sync_str(enum sync_state state)
3350 /* Has to be in above sync_state order! */
3351 static const char *sync_strs[] = {
3361 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3364 /* Return enum sync_state for @mddev derived from @recovery flags */
3365 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3367 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3370 /* The MD sync thread can be done with io or be interrupted but still be running */
3371 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3372 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3373 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3374 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3377 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3378 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3380 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3385 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3388 if (mddev->reshape_position != MaxSector)
3396 * Return status string for @rdev
3398 * Status characters:
3400 * 'D' = Dead/Failed raid set component or raid4/5/6 journal device
3401 * 'a' = Alive but not in-sync raid set component _or_ alive raid4/5/6 'write_back' journal device
3402 * 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
3403 * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
3405 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3409 else if (test_bit(Faulty, &rdev->flags))
3411 else if (test_bit(Journal, &rdev->flags))
3412 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3413 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3414 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3415 !test_bit(In_sync, &rdev->flags)))
3421 /* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
3422 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3423 enum sync_state state, sector_t resync_max_sectors)
3426 struct mddev *mddev = &rs->md;
3428 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3429 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3431 if (rs_is_raid0(rs)) {
3432 r = resync_max_sectors;
3433 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3436 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3437 r = mddev->recovery_cp;
3439 r = mddev->curr_resync_completed;
3441 if (state == st_idle && r >= resync_max_sectors) {
3445 /* In case we have finished recovering, the array is in sync. */
3446 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3447 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3449 } else if (state == st_recover)
3451 * In case we are recovering, the array is not in sync
3452 * and health chars should show the recovering legs.
3454 * Already retrieved recovery offset from curr_resync_completed above.
3458 else if (state == st_resync || state == st_reshape)
3460 * If "resync/reshape" is occurring, the raid set
3461 * is or may be out of sync hence the health
3462 * characters shall be 'a'.
3464 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3466 else if (state == st_check || state == st_repair)
3468 * If "check" or "repair" is occurring, the raid set has
3469 * undergone an initial sync and the health characters
3470 * should not be 'a' anymore.
3472 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3474 else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3476 * We are idle and recovery is needed, prevent 'A' chars race
3477 * caused by components still set to in-sync by constructor.
3479 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3483 * We are idle and the raid set may be doing an initial
3484 * sync, or it may be rebuilding individual components.
3485 * If all the devices are In_sync, then it is the raid set
3486 * that is being initialized.
3488 struct md_rdev *rdev;
3490 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3491 rdev_for_each(rdev, mddev)
3492 if (!test_bit(Journal, &rdev->flags) &&
3493 !test_bit(In_sync, &rdev->flags)) {
3494 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3500 return min(r, resync_max_sectors);
3503 /* Helper to return @dev name or "-" if !@dev */
3504 static const char *__get_dev_name(struct dm_dev *dev)
3506 return dev ? dev->name : "-";
3509 static void raid_status(struct dm_target *ti, status_type_t type,
3510 unsigned int status_flags, char *result, unsigned int maxlen)
3512 struct raid_set *rs = ti->private;
3513 struct mddev *mddev = &rs->md;
3514 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
3515 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3516 unsigned long recovery;
3517 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
3518 unsigned int sz = 0;
3519 unsigned int rebuild_writemostly_count = 0;
3520 sector_t progress, resync_max_sectors, resync_mismatches;
3521 enum sync_state state;
3522 struct raid_type *rt;
3525 case STATUSTYPE_INFO:
3526 /* *Should* always succeed */
3527 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3531 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3533 /* Access most recent mddev properties for status output */
3535 /* Get sensible max sectors even if raid set not yet started */
3536 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3537 mddev->resync_max_sectors : mddev->dev_sectors;
3538 recovery = rs->md.recovery;
3539 state = decipher_sync_action(mddev, recovery);
3540 progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
3541 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3542 atomic64_read(&mddev->resync_mismatches) : 0;
3544 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3545 for (i = 0; i < rs->raid_disks; i++)
3546 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3549 * In-sync/Reshape ratio:
3550 * The in-sync ratio shows the progress of:
3551 * - Initializing the raid set
3552 * - Rebuilding a subset of devices of the raid set
3553 * The user can distinguish between the two by referring
3554 * to the status characters.
3556 * The reshape ratio shows the progress of
3557 * changing the raid layout or the number of
3558 * disks of a raid set
3560 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3561 (unsigned long long) resync_max_sectors);
3567 * See Documentation/admin-guide/device-mapper/dm-raid.rst for
3568 * information on each of these states.
3570 DMEMIT(" %s", sync_str(state));
3575 * resync_mismatches/mismatch_cnt
3576 * This field shows the number of discrepancies found when
3577 * performing a "check" of the raid set.
3579 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3584 * data_offset (needed for out of space reshaping)
3585 * This field shows the data offset into the data
3586 * image LV where the first stripes data starts.
3588 * We keep data_offset equal on all raid disks of the set,
3589 * so retrieving it from the first raid disk is sufficient.
3591 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3596 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3597 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3600 case STATUSTYPE_TABLE:
3601 /* Report the table line string you would use to construct this raid set */
3604 * Count any rebuild or writemostly argument pairs and subtract the
3605 * hweight count being added below of any rebuild and writemostly ctr flags.
3607 for (i = 0; i < rs->raid_disks; i++) {
3608 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
3609 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3611 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
3612 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
3613 /* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */
3614 raid_param_cnt += rebuild_writemostly_count +
3615 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3616 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
3617 /* Emit table line */
3618 /* This has to be in the documented order for userspace! */
3619 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3620 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3621 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3622 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3623 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3624 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
3625 for (i = 0; i < rs->raid_disks; i++)
3626 if (test_bit(i, (void *) rs->rebuild_disks))
3627 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
3628 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3629 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3630 mddev->bitmap_info.daemon_sleep);
3631 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3632 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3633 mddev->sync_speed_min);
3634 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3635 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3636 mddev->sync_speed_max);
3637 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
3638 for (i = 0; i < rs->raid_disks; i++)
3639 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3640 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3641 rs->dev[i].rdev.raid_disk);
3642 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3643 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3644 mddev->bitmap_info.max_write_behind);
3645 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3646 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3648 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3649 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3650 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3651 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3652 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3653 raid10_md_layout_to_copies(mddev->layout));
3654 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3655 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3656 raid10_md_layout_to_format(mddev->layout));
3657 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3658 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3659 max(rs->delta_disks, mddev->delta_disks));
3660 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3661 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3662 (unsigned long long) rs->data_offset);
3663 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3664 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3665 __get_dev_name(rs->journal_dev.dev));
3666 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3667 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3668 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3669 DMEMIT(" %d", rs->raid_disks);
3670 for (i = 0; i < rs->raid_disks; i++)
3671 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3672 __get_dev_name(rs->dev[i].data_dev));
3675 case STATUSTYPE_IMA:
3676 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3680 DMEMIT_TARGET_NAME_VERSION(ti->type);
3681 DMEMIT(",raid_type=%s,raid_disks=%d", rt->name, mddev->raid_disks);
3683 /* Access most recent mddev properties for status output */
3685 recovery = rs->md.recovery;
3686 state = decipher_sync_action(mddev, recovery);
3687 DMEMIT(",raid_state=%s", sync_str(state));
3689 for (i = 0; i < rs->raid_disks; i++) {
3690 DMEMIT(",raid_device_%d_status=", i);
3691 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3694 if (rt_is_raid456(rt)) {
3695 DMEMIT(",journal_dev_mode=");
3696 switch (rs->journal_dev.mode) {
3697 case R5C_JOURNAL_MODE_WRITE_THROUGH:
3699 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_THROUGH].param);
3701 case R5C_JOURNAL_MODE_WRITE_BACK:
3703 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_BACK].param);
3715 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3716 char *result, unsigned int maxlen)
3718 struct raid_set *rs = ti->private;
3719 struct mddev *mddev = &rs->md;
3721 if (!mddev->pers || !mddev->pers->sync_request)
3724 if (!strcasecmp(argv[0], "frozen"))
3725 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3727 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3729 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3730 if (mddev->sync_thread) {
3731 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3732 md_unregister_thread(&mddev->sync_thread);
3733 md_reap_sync_thread(mddev);
3735 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3737 else if (!strcasecmp(argv[0], "resync"))
3738 ; /* MD_RECOVERY_NEEDED set below */
3739 else if (!strcasecmp(argv[0], "recover"))
3740 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3742 if (!strcasecmp(argv[0], "check")) {
3743 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3744 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3745 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3746 } else if (!strcasecmp(argv[0], "repair")) {
3747 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3748 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3752 if (mddev->ro == 2) {
3753 /* A write to sync_action is enough to justify
3754 * canceling read-auto mode
3757 if (!mddev->suspended && mddev->sync_thread)
3758 md_wakeup_thread(mddev->sync_thread);
3760 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3761 if (!mddev->suspended && mddev->thread)
3762 md_wakeup_thread(mddev->thread);
3767 static int raid_iterate_devices(struct dm_target *ti,
3768 iterate_devices_callout_fn fn, void *data)
3770 struct raid_set *rs = ti->private;
3774 for (i = 0; !r && i < rs->raid_disks; i++) {
3775 if (rs->dev[i].data_dev) {
3776 r = fn(ti, rs->dev[i].data_dev,
3777 0, /* No offset on data devs */
3778 rs->md.dev_sectors, data);
3785 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3787 struct raid_set *rs = ti->private;
3788 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
3790 blk_limits_io_min(limits, chunk_size_bytes);
3791 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
3794 static void raid_postsuspend(struct dm_target *ti)
3796 struct raid_set *rs = ti->private;
3798 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3799 /* Writes have to be stopped before suspending to avoid deadlocks. */
3800 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
3801 md_stop_writes(&rs->md);
3803 mddev_lock_nointr(&rs->md);
3804 mddev_suspend(&rs->md);
3805 mddev_unlock(&rs->md);
3809 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3812 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3813 unsigned long flags;
3814 bool cleared = false;
3815 struct dm_raid_superblock *sb;
3816 struct mddev *mddev = &rs->md;
3819 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
3820 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3823 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3825 for (i = 0; i < rs->raid_disks; i++) {
3826 r = &rs->dev[i].rdev;
3827 /* HM FIXME: enhance journal device recovery processing */
3828 if (test_bit(Journal, &r->flags))
3831 if (test_bit(Faulty, &r->flags) &&
3832 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3833 DMINFO("Faulty %s device #%d has readable super block."
3834 " Attempting to revive it.",
3835 rs->raid_type->name, i);
3838 * Faulty bit may be set, but sometimes the array can
3839 * be suspended before the personalities can respond
3840 * by removing the device from the array (i.e. calling
3841 * 'hot_remove_disk'). If they haven't yet removed
3842 * the failed device, its 'raid_disk' number will be
3843 * '>= 0' - meaning we must call this function
3847 clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */
3848 if (r->raid_disk >= 0) {
3849 if (mddev->pers->hot_remove_disk(mddev, r)) {
3850 /* Failed to revive this device, try next */
3855 r->raid_disk = r->saved_raid_disk = i;
3857 clear_bit(Faulty, &r->flags);
3858 clear_bit(WriteErrorSeen, &r->flags);
3860 if (mddev->pers->hot_add_disk(mddev, r)) {
3861 /* Failed to revive this device, try next */
3862 r->raid_disk = r->saved_raid_disk = -1;
3865 clear_bit(In_sync, &r->flags);
3866 r->recovery_offset = 0;
3867 set_bit(i, (void *) cleared_failed_devices);
3873 /* If any failed devices could be cleared, update all sbs failed_devices bits */
3875 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3877 rdev_for_each(r, &rs->md) {
3878 if (test_bit(Journal, &r->flags))
3881 sb = page_address(r->sb_page);
3882 sb_retrieve_failed_devices(sb, failed_devices);
3884 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3885 failed_devices[i] &= ~cleared_failed_devices[i];
3887 sb_update_failed_devices(sb, failed_devices);
3892 static int __load_dirty_region_bitmap(struct raid_set *rs)
3896 /* Try loading the bitmap unless "raid0", which does not have one */
3897 if (!rs_is_raid0(rs) &&
3898 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3899 r = md_bitmap_load(&rs->md);
3901 DMERR("Failed to load bitmap");
3907 /* Enforce updating all superblocks */
3908 static void rs_update_sbs(struct raid_set *rs)
3910 struct mddev *mddev = &rs->md;
3913 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3915 md_update_sb(mddev, 1);
3920 * Reshape changes raid algorithm of @rs to new one within personality
3921 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
3922 * disks from a raid set thus growing/shrinking it or resizes the set
3924 * Call mddev_lock_nointr() before!
3926 static int rs_start_reshape(struct raid_set *rs)
3929 struct mddev *mddev = &rs->md;
3930 struct md_personality *pers = mddev->pers;
3932 /* Don't allow the sync thread to work until the table gets reloaded. */
3933 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3935 r = rs_setup_reshape(rs);
3940 * Check any reshape constraints enforced by the personalility
3942 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
3944 r = pers->check_reshape(mddev);
3946 rs->ti->error = "pers->check_reshape() failed";
3951 * Personality may not provide start reshape method in which
3952 * case check_reshape above has already covered everything
3954 if (pers->start_reshape) {
3955 r = pers->start_reshape(mddev);
3957 rs->ti->error = "pers->start_reshape() failed";
3963 * Now reshape got set up, update superblocks to
3964 * reflect the fact so that a table reload will
3965 * access proper superblock content in the ctr.
3972 static int raid_preresume(struct dm_target *ti)
3975 struct raid_set *rs = ti->private;
3976 struct mddev *mddev = &rs->md;
3978 /* This is a resume after a suspend of the set -> it's already started. */
3979 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3983 * The superblocks need to be updated on disk if the
3984 * array is new or new devices got added (thus zeroed
3985 * out by userspace) or __load_dirty_region_bitmap
3986 * will overwrite them in core with old data or fail.
3988 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3991 /* Load the bitmap from disk unless raid0 */
3992 r = __load_dirty_region_bitmap(rs);
3996 /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
3997 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3998 mddev->array_sectors = rs->array_sectors;
3999 mddev->dev_sectors = rs->dev_sectors;
4000 rs_set_rdev_sectors(rs);
4001 rs_set_capacity(rs);
4004 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
4005 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
4006 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
4007 (rs->requested_bitmap_chunk_sectors &&
4008 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
4009 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
4011 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
4013 DMERR("Failed to resize bitmap");
4016 /* Check for any resize/reshape on @rs and adjust/initiate */
4017 /* Be prepared for mddev_resume() in raid_resume() */
4018 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4019 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
4020 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4021 mddev->resync_min = mddev->recovery_cp;
4022 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
4023 mddev->resync_max_sectors = mddev->dev_sectors;
4026 /* Check for any reshape request unless new raid set */
4027 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
4028 /* Initiate a reshape. */
4029 rs_set_rdev_sectors(rs);
4030 mddev_lock_nointr(mddev);
4031 r = rs_start_reshape(rs);
4032 mddev_unlock(mddev);
4034 DMWARN("Failed to check/start reshape, continuing without change");
4041 static void raid_resume(struct dm_target *ti)
4043 struct raid_set *rs = ti->private;
4044 struct mddev *mddev = &rs->md;
4046 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
4048 * A secondary resume while the device is active.
4049 * Take this opportunity to check whether any failed
4050 * devices are reachable again.
4052 attempt_restore_of_faulty_devices(rs);
4055 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4056 /* Only reduce raid set size before running a disk removing reshape. */
4057 if (mddev->delta_disks < 0)
4058 rs_set_capacity(rs);
4060 mddev_lock_nointr(mddev);
4061 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4064 mddev_resume(mddev);
4065 mddev_unlock(mddev);
4069 static struct target_type raid_target = {
4071 .version = {1, 15, 1},
4072 .module = THIS_MODULE,
4076 .status = raid_status,
4077 .message = raid_message,
4078 .iterate_devices = raid_iterate_devices,
4079 .io_hints = raid_io_hints,
4080 .postsuspend = raid_postsuspend,
4081 .preresume = raid_preresume,
4082 .resume = raid_resume,
4085 static int __init dm_raid_init(void)
4087 DMINFO("Loading target version %u.%u.%u",
4088 raid_target.version[0],
4089 raid_target.version[1],
4090 raid_target.version[2]);
4091 return dm_register_target(&raid_target);
4094 static void __exit dm_raid_exit(void)
4096 dm_unregister_target(&raid_target);
4099 module_init(dm_raid_init);
4100 module_exit(dm_raid_exit);
4102 module_param(devices_handle_discard_safely, bool, 0644);
4103 MODULE_PARM_DESC(devices_handle_discard_safely,
4104 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4106 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4107 MODULE_ALIAS("dm-raid0");
4108 MODULE_ALIAS("dm-raid1");
4109 MODULE_ALIAS("dm-raid10");
4110 MODULE_ALIAS("dm-raid4");
4111 MODULE_ALIAS("dm-raid5");
4112 MODULE_ALIAS("dm-raid6");
4115 MODULE_LICENSE("GPL");