]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | raid0.c : Multiple Devices driver for Linux | |
f72ffdd6 | 3 | Copyright (C) 1994-96 Marc ZYNGIER |
1da177e4 LT |
4 | <[email protected]> or |
5 | <[email protected]> | |
f72ffdd6 | 6 | Copyright (C) 1999, 2000 Ingo Molnar, Red Hat |
1da177e4 LT |
7 | |
8 | RAID-0 management functions. | |
9 | ||
10 | This program is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
f72ffdd6 | 14 | |
1da177e4 LT |
15 | You should have received a copy of the GNU General Public License |
16 | (for example /usr/src/linux/COPYING); if not, write to the Free | |
f72ffdd6 | 17 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
1da177e4 LT |
18 | */ |
19 | ||
bff61975 | 20 | #include <linux/blkdev.h> |
bff61975 | 21 | #include <linux/seq_file.h> |
056075c7 | 22 | #include <linux/module.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
43b2e5d8 | 24 | #include "md.h" |
ef740c37 | 25 | #include "raid0.h" |
9af204cf | 26 | #include "raid5.h" |
1da177e4 | 27 | |
5c675f83 | 28 | static int raid0_congested(struct mddev *mddev, int bits) |
26be34dc | 29 | { |
e373ab10 | 30 | struct r0conf *conf = mddev->private; |
3cb03002 | 31 | struct md_rdev **devlist = conf->devlist; |
84707f38 | 32 | int raid_disks = conf->strip_zone[0].nb_dev; |
26be34dc N |
33 | int i, ret = 0; |
34 | ||
84707f38 | 35 | for (i = 0; i < raid_disks && !ret ; i++) { |
165125e1 | 36 | struct request_queue *q = bdev_get_queue(devlist[i]->bdev); |
26be34dc N |
37 | |
38 | ret |= bdi_congested(&q->backing_dev_info, bits); | |
39 | } | |
40 | return ret; | |
41 | } | |
42 | ||
46994191 | 43 | /* |
44 | * inform the user of the raid configuration | |
45 | */ | |
fd01b88c | 46 | static void dump_zones(struct mddev *mddev) |
46994191 | 47 | { |
50de8df4 | 48 | int j, k; |
46994191 | 49 | sector_t zone_size = 0; |
50 | sector_t zone_start = 0; | |
51 | char b[BDEVNAME_SIZE]; | |
e373ab10 | 52 | struct r0conf *conf = mddev->private; |
84707f38 | 53 | int raid_disks = conf->strip_zone[0].nb_dev; |
50de8df4 N |
54 | printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", |
55 | mdname(mddev), | |
56 | conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s"); | |
46994191 | 57 | for (j = 0; j < conf->nr_strip_zones; j++) { |
50de8df4 | 58 | printk(KERN_INFO "md: zone%d=[", j); |
46994191 | 59 | for (k = 0; k < conf->strip_zone[j].nb_dev; k++) |
50de8df4 | 60 | printk(KERN_CONT "%s%s", k?"/":"", |
84707f38 | 61 | bdevname(conf->devlist[j*raid_disks |
46994191 | 62 | + k]->bdev, b)); |
b5a20961 | 63 | printk(KERN_CONT "]\n"); |
46994191 | 64 | |
65 | zone_size = conf->strip_zone[j].zone_end - zone_start; | |
50de8df4 N |
66 | printk(KERN_INFO " zone-offset=%10lluKB, " |
67 | "device-offset=%10lluKB, size=%10lluKB\n", | |
46994191 | 68 | (unsigned long long)zone_start>>1, |
69 | (unsigned long long)conf->strip_zone[j].dev_start>>1, | |
70 | (unsigned long long)zone_size>>1); | |
71 | zone_start = conf->strip_zone[j].zone_end; | |
72 | } | |
46994191 | 73 | } |
74 | ||
e373ab10 | 75 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) |
1da177e4 | 76 | { |
a9f326eb | 77 | int i, c, err; |
49f357a2 | 78 | sector_t curr_zone_end, sectors; |
3cb03002 | 79 | struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev; |
1da177e4 LT |
80 | struct strip_zone *zone; |
81 | int cnt; | |
82 | char b[BDEVNAME_SIZE]; | |
50de8df4 | 83 | char b2[BDEVNAME_SIZE]; |
e373ab10 | 84 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
199dc6ed | 85 | unsigned short blksize = 512; |
ed7b0038 | 86 | |
7dedd15d | 87 | *private_conf = ERR_PTR(-ENOMEM); |
ed7b0038 AN |
88 | if (!conf) |
89 | return -ENOMEM; | |
dafb20fa | 90 | rdev_for_each(rdev1, mddev) { |
50de8df4 N |
91 | pr_debug("md/raid0:%s: looking at %s\n", |
92 | mdname(mddev), | |
93 | bdevname(rdev1->bdev, b)); | |
1da177e4 | 94 | c = 0; |
13f2682b N |
95 | |
96 | /* round size to chunk_size */ | |
97 | sectors = rdev1->sectors; | |
98 | sector_div(sectors, mddev->chunk_sectors); | |
99 | rdev1->sectors = sectors * mddev->chunk_sectors; | |
100 | ||
199dc6ed N |
101 | blksize = max(blksize, queue_logical_block_size( |
102 | rdev1->bdev->bd_disk->queue)); | |
103 | ||
dafb20fa | 104 | rdev_for_each(rdev2, mddev) { |
50de8df4 N |
105 | pr_debug("md/raid0:%s: comparing %s(%llu)" |
106 | " with %s(%llu)\n", | |
107 | mdname(mddev), | |
108 | bdevname(rdev1->bdev,b), | |
109 | (unsigned long long)rdev1->sectors, | |
110 | bdevname(rdev2->bdev,b2), | |
111 | (unsigned long long)rdev2->sectors); | |
1da177e4 | 112 | if (rdev2 == rdev1) { |
50de8df4 N |
113 | pr_debug("md/raid0:%s: END\n", |
114 | mdname(mddev)); | |
1da177e4 LT |
115 | break; |
116 | } | |
dd8ac336 | 117 | if (rdev2->sectors == rdev1->sectors) { |
1da177e4 LT |
118 | /* |
119 | * Not unique, don't count it as a new | |
120 | * group | |
121 | */ | |
50de8df4 N |
122 | pr_debug("md/raid0:%s: EQUAL\n", |
123 | mdname(mddev)); | |
1da177e4 LT |
124 | c = 1; |
125 | break; | |
126 | } | |
50de8df4 N |
127 | pr_debug("md/raid0:%s: NOT EQUAL\n", |
128 | mdname(mddev)); | |
1da177e4 LT |
129 | } |
130 | if (!c) { | |
50de8df4 N |
131 | pr_debug("md/raid0:%s: ==> UNIQUE\n", |
132 | mdname(mddev)); | |
1da177e4 | 133 | conf->nr_strip_zones++; |
50de8df4 N |
134 | pr_debug("md/raid0:%s: %d zones\n", |
135 | mdname(mddev), conf->nr_strip_zones); | |
1da177e4 LT |
136 | } |
137 | } | |
50de8df4 N |
138 | pr_debug("md/raid0:%s: FINAL %d zones\n", |
139 | mdname(mddev), conf->nr_strip_zones); | |
199dc6ed N |
140 | /* |
141 | * now since we have the hard sector sizes, we can make sure | |
142 | * chunk size is a multiple of that sector size | |
143 | */ | |
144 | if ((mddev->chunk_sectors << 9) % blksize) { | |
145 | printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n", | |
146 | mdname(mddev), | |
147 | mddev->chunk_sectors << 9, blksize); | |
148 | err = -EINVAL; | |
149 | goto abort; | |
150 | } | |
151 | ||
ed7b0038 | 152 | err = -ENOMEM; |
9ffae0cf | 153 | conf->strip_zone = kzalloc(sizeof(struct strip_zone)* |
1da177e4 LT |
154 | conf->nr_strip_zones, GFP_KERNEL); |
155 | if (!conf->strip_zone) | |
ed7b0038 | 156 | goto abort; |
3cb03002 | 157 | conf->devlist = kzalloc(sizeof(struct md_rdev*)* |
1da177e4 LT |
158 | conf->nr_strip_zones*mddev->raid_disks, |
159 | GFP_KERNEL); | |
160 | if (!conf->devlist) | |
ed7b0038 | 161 | goto abort; |
1da177e4 | 162 | |
1da177e4 LT |
163 | /* The first zone must contain all devices, so here we check that |
164 | * there is a proper alignment of slots to devices and find them all | |
165 | */ | |
166 | zone = &conf->strip_zone[0]; | |
167 | cnt = 0; | |
168 | smallest = NULL; | |
b414579f | 169 | dev = conf->devlist; |
ed7b0038 | 170 | err = -EINVAL; |
dafb20fa | 171 | rdev_for_each(rdev1, mddev) { |
1da177e4 LT |
172 | int j = rdev1->raid_disk; |
173 | ||
e93f68a1 | 174 | if (mddev->level == 10) { |
9af204cf TM |
175 | /* taking over a raid10-n2 array */ |
176 | j /= 2; | |
e93f68a1 N |
177 | rdev1->new_raid_disk = j; |
178 | } | |
9af204cf | 179 | |
fc3a08b8 KW |
180 | if (mddev->level == 1) { |
181 | /* taiking over a raid1 array- | |
182 | * we have only one active disk | |
183 | */ | |
184 | j = 0; | |
185 | rdev1->new_raid_disk = j; | |
186 | } | |
187 | ||
f96c9f30 N |
188 | if (j < 0) { |
189 | printk(KERN_ERR | |
190 | "md/raid0:%s: remove inactive devices before converting to RAID0\n", | |
191 | mdname(mddev)); | |
192 | goto abort; | |
193 | } | |
194 | if (j >= mddev->raid_disks) { | |
b5a20961 N |
195 | printk(KERN_ERR "md/raid0:%s: bad disk number %d - " |
196 | "aborting!\n", mdname(mddev), j); | |
1da177e4 LT |
197 | goto abort; |
198 | } | |
b414579f | 199 | if (dev[j]) { |
b5a20961 N |
200 | printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " |
201 | "aborting!\n", mdname(mddev), j); | |
1da177e4 LT |
202 | goto abort; |
203 | } | |
b414579f | 204 | dev[j] = rdev1; |
1da177e4 | 205 | |
dd8ac336 | 206 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
1da177e4 LT |
207 | smallest = rdev1; |
208 | cnt++; | |
209 | } | |
210 | if (cnt != mddev->raid_disks) { | |
b5a20961 N |
211 | printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " |
212 | "aborting!\n", mdname(mddev), cnt, mddev->raid_disks); | |
1da177e4 LT |
213 | goto abort; |
214 | } | |
215 | zone->nb_dev = cnt; | |
49f357a2 | 216 | zone->zone_end = smallest->sectors * cnt; |
1da177e4 | 217 | |
49f357a2 | 218 | curr_zone_end = zone->zone_end; |
1da177e4 LT |
219 | |
220 | /* now do the other zones */ | |
221 | for (i = 1; i < conf->nr_strip_zones; i++) | |
222 | { | |
a9f326eb N |
223 | int j; |
224 | ||
1da177e4 | 225 | zone = conf->strip_zone + i; |
b414579f | 226 | dev = conf->devlist + i * mddev->raid_disks; |
1da177e4 | 227 | |
50de8df4 | 228 | pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); |
d27a43ab | 229 | zone->dev_start = smallest->sectors; |
1da177e4 LT |
230 | smallest = NULL; |
231 | c = 0; | |
232 | ||
233 | for (j=0; j<cnt; j++) { | |
b414579f | 234 | rdev = conf->devlist[j]; |
d27a43ab | 235 | if (rdev->sectors <= zone->dev_start) { |
50de8df4 N |
236 | pr_debug("md/raid0:%s: checking %s ... nope\n", |
237 | mdname(mddev), | |
238 | bdevname(rdev->bdev, b)); | |
dd8ac336 AN |
239 | continue; |
240 | } | |
50de8df4 N |
241 | pr_debug("md/raid0:%s: checking %s ..." |
242 | " contained as device %d\n", | |
243 | mdname(mddev), | |
244 | bdevname(rdev->bdev, b), c); | |
b414579f | 245 | dev[c] = rdev; |
dd8ac336 AN |
246 | c++; |
247 | if (!smallest || rdev->sectors < smallest->sectors) { | |
248 | smallest = rdev; | |
50de8df4 N |
249 | pr_debug("md/raid0:%s: (%llu) is smallest!.\n", |
250 | mdname(mddev), | |
251 | (unsigned long long)rdev->sectors); | |
dd8ac336 | 252 | } |
1da177e4 LT |
253 | } |
254 | ||
255 | zone->nb_dev = c; | |
49f357a2 | 256 | sectors = (smallest->sectors - zone->dev_start) * c; |
50de8df4 N |
257 | pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", |
258 | mdname(mddev), | |
259 | zone->nb_dev, (unsigned long long)sectors); | |
1da177e4 | 260 | |
49f357a2 | 261 | curr_zone_end += sectors; |
d27a43ab | 262 | zone->zone_end = curr_zone_end; |
1da177e4 | 263 | |
50de8df4 N |
264 | pr_debug("md/raid0:%s: current zone start: %llu\n", |
265 | mdname(mddev), | |
266 | (unsigned long long)smallest->sectors); | |
1da177e4 | 267 | } |
1da177e4 | 268 | |
50de8df4 | 269 | pr_debug("md/raid0:%s: done.\n", mdname(mddev)); |
9af204cf TM |
270 | *private_conf = conf; |
271 | ||
1da177e4 | 272 | return 0; |
5568a603 | 273 | abort: |
ed7b0038 AN |
274 | kfree(conf->strip_zone); |
275 | kfree(conf->devlist); | |
276 | kfree(conf); | |
58ebb34c | 277 | *private_conf = ERR_PTR(err); |
ed7b0038 | 278 | return err; |
1da177e4 LT |
279 | } |
280 | ||
ba13da47 N |
281 | /* Find the zone which holds a particular offset |
282 | * Update *sectorp to be an offset in that zone | |
283 | */ | |
284 | static struct strip_zone *find_zone(struct r0conf *conf, | |
285 | sector_t *sectorp) | |
286 | { | |
287 | int i; | |
288 | struct strip_zone *z = conf->strip_zone; | |
289 | sector_t sector = *sectorp; | |
290 | ||
291 | for (i = 0; i < conf->nr_strip_zones; i++) | |
292 | if (sector < z[i].zone_end) { | |
293 | if (i) | |
294 | *sectorp = sector - z[i-1].zone_end; | |
295 | return z + i; | |
296 | } | |
297 | BUG(); | |
298 | } | |
299 | ||
300 | /* | |
301 | * remaps the bio to the target device. we separate two flows. | |
47d68979 | 302 | * power 2 flow and a general flow for the sake of performance |
ba13da47 N |
303 | */ |
304 | static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, | |
305 | sector_t sector, sector_t *sector_offset) | |
306 | { | |
307 | unsigned int sect_in_chunk; | |
308 | sector_t chunk; | |
309 | struct r0conf *conf = mddev->private; | |
310 | int raid_disks = conf->strip_zone[0].nb_dev; | |
311 | unsigned int chunk_sects = mddev->chunk_sectors; | |
312 | ||
313 | if (is_power_of_2(chunk_sects)) { | |
314 | int chunksect_bits = ffz(~chunk_sects); | |
315 | /* find the sector offset inside the chunk */ | |
316 | sect_in_chunk = sector & (chunk_sects - 1); | |
317 | sector >>= chunksect_bits; | |
318 | /* chunk in zone */ | |
319 | chunk = *sector_offset; | |
320 | /* quotient is the chunk in real device*/ | |
321 | sector_div(chunk, zone->nb_dev << chunksect_bits); | |
322 | } else{ | |
323 | sect_in_chunk = sector_div(sector, chunk_sects); | |
324 | chunk = *sector_offset; | |
325 | sector_div(chunk, chunk_sects * zone->nb_dev); | |
326 | } | |
327 | /* | |
328 | * position the bio over the real device | |
329 | * real sector = chunk in device + starting of zone | |
330 | * + the position in the chunk | |
331 | */ | |
332 | *sector_offset = (chunk * chunk_sects) + sect_in_chunk; | |
333 | return conf->devlist[(zone - conf->strip_zone)*raid_disks | |
334 | + sector_div(sector, zone->nb_dev)]; | |
335 | } | |
336 | ||
fd01b88c | 337 | static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
80c3a6ce DW |
338 | { |
339 | sector_t array_sectors = 0; | |
3cb03002 | 340 | struct md_rdev *rdev; |
80c3a6ce DW |
341 | |
342 | WARN_ONCE(sectors || raid_disks, | |
343 | "%s does not support generic reshape\n", __func__); | |
344 | ||
dafb20fa | 345 | rdev_for_each(rdev, mddev) |
a6468539 N |
346 | array_sectors += (rdev->sectors & |
347 | ~(sector_t)(mddev->chunk_sectors-1)); | |
80c3a6ce DW |
348 | |
349 | return array_sectors; | |
350 | } | |
351 | ||
afa0f557 | 352 | static void raid0_free(struct mddev *mddev, void *priv); |
0366ef84 | 353 | |
fd01b88c | 354 | static int raid0_run(struct mddev *mddev) |
1da177e4 | 355 | { |
e373ab10 | 356 | struct r0conf *conf; |
5568a603 | 357 | int ret; |
1da177e4 | 358 | |
9d8f0363 | 359 | if (mddev->chunk_sectors == 0) { |
b5a20961 N |
360 | printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", |
361 | mdname(mddev)); | |
2604b703 N |
362 | return -EINVAL; |
363 | } | |
0894cc30 AN |
364 | if (md_check_no_bitmap(mddev)) |
365 | return -EINVAL; | |
753f2856 | 366 | |
9af204cf TM |
367 | /* if private is not null, we are here after takeover */ |
368 | if (mddev->private == NULL) { | |
369 | ret = create_strip_zones(mddev, &conf); | |
370 | if (ret < 0) | |
371 | return ret; | |
372 | mddev->private = conf; | |
373 | } | |
374 | conf = mddev->private; | |
199dc6ed N |
375 | if (mddev->queue) { |
376 | struct md_rdev *rdev; | |
377 | bool discard_supported = false; | |
378 | ||
199dc6ed N |
379 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
380 | blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); | |
381 | blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); | |
382 | ||
383 | blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); | |
384 | blk_queue_io_opt(mddev->queue, | |
385 | (mddev->chunk_sectors << 9) * mddev->raid_disks); | |
386 | ||
66eefe5d N |
387 | rdev_for_each(rdev, mddev) { |
388 | disk_stack_limits(mddev->gendisk, rdev->bdev, | |
389 | rdev->data_offset << 9); | |
390 | if (blk_queue_discard(bdev_get_queue(rdev->bdev))) | |
391 | discard_supported = true; | |
392 | } | |
199dc6ed N |
393 | if (!discard_supported) |
394 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
395 | else | |
396 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); | |
397 | } | |
1da177e4 LT |
398 | |
399 | /* calculate array device size */ | |
1f403624 | 400 | md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); |
1da177e4 | 401 | |
b5a20961 N |
402 | printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", |
403 | mdname(mddev), | |
404 | (unsigned long long)mddev->array_sectors); | |
753f2856 HM |
405 | |
406 | if (mddev->queue) { | |
407 | /* calculate the max read-ahead size. | |
408 | * For read-ahead of large files to be effective, we need to | |
409 | * readahead at least twice a whole stripe. i.e. number of devices | |
410 | * multiplied by chunk size times 2. | |
411 | * If an individual device has an ra_pages greater than the | |
412 | * chunk size, then we will not drive that device as hard as it | |
413 | * wants. We consider this a configuration error: a larger | |
414 | * chunksize should be used in that case. | |
415 | */ | |
9d8f0363 AN |
416 | int stripe = mddev->raid_disks * |
417 | (mddev->chunk_sectors << 9) / PAGE_SIZE; | |
1da177e4 LT |
418 | if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) |
419 | mddev->queue->backing_dev_info.ra_pages = 2* stripe; | |
420 | } | |
421 | ||
46994191 | 422 | dump_zones(mddev); |
0366ef84 | 423 | |
424 | ret = md_integrity_register(mddev); | |
0366ef84 | 425 | |
426 | return ret; | |
1da177e4 LT |
427 | } |
428 | ||
afa0f557 | 429 | static void raid0_free(struct mddev *mddev, void *priv) |
1da177e4 | 430 | { |
afa0f557 | 431 | struct r0conf *conf = priv; |
1da177e4 | 432 | |
990a8baf | 433 | kfree(conf->strip_zone); |
fb5ab4b5 | 434 | kfree(conf->devlist); |
990a8baf | 435 | kfree(conf); |
1da177e4 LT |
436 | } |
437 | ||
fbb704ef | 438 | /* |
439 | * Is io distribute over 1 or more chunks ? | |
440 | */ | |
fd01b88c | 441 | static inline int is_io_in_chunk_boundary(struct mddev *mddev, |
fbb704ef | 442 | unsigned int chunk_sects, struct bio *bio) |
443 | { | |
d6e412ea | 444 | if (likely(is_power_of_2(chunk_sects))) { |
4f024f37 KO |
445 | return chunk_sects >= |
446 | ((bio->bi_iter.bi_sector & (chunk_sects-1)) | |
aa8b57aa | 447 | + bio_sectors(bio)); |
fbb704ef | 448 | } else{ |
4f024f37 | 449 | sector_t sector = bio->bi_iter.bi_sector; |
fbb704ef | 450 | return chunk_sects >= (sector_div(sector, chunk_sects) |
aa8b57aa | 451 | + bio_sectors(bio)); |
fbb704ef | 452 | } |
453 | } | |
454 | ||
b4fdcb02 | 455 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
fbb704ef | 456 | { |
1da177e4 | 457 | struct strip_zone *zone; |
3cb03002 | 458 | struct md_rdev *tmp_dev; |
20d0189b | 459 | struct bio *split; |
1da177e4 | 460 | |
28a8f0d3 | 461 | if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { |
e9c7469b | 462 | md_flush_request(mddev, bio); |
5a7bbad2 | 463 | return; |
e5dcdd80 N |
464 | } |
465 | ||
20d0189b | 466 | do { |
4f024f37 | 467 | sector_t sector = bio->bi_iter.bi_sector; |
20d0189b KO |
468 | unsigned chunk_sects = mddev->chunk_sectors; |
469 | ||
470 | unsigned sectors = chunk_sects - | |
471 | (likely(is_power_of_2(chunk_sects)) | |
472 | ? (sector & (chunk_sects-1)) | |
473 | : sector_div(sector, chunk_sects)); | |
474 | ||
a8115776 EW |
475 | /* Restore due to sector_div */ |
476 | sector = bio->bi_iter.bi_sector; | |
477 | ||
20d0189b KO |
478 | if (sectors < bio_sectors(bio)) { |
479 | split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); | |
480 | bio_chain(split, bio); | |
481 | } else { | |
482 | split = bio; | |
483 | } | |
1da177e4 | 484 | |
20d0189b KO |
485 | zone = find_zone(mddev->private, §or); |
486 | tmp_dev = map_sector(mddev, zone, sector, §or); | |
487 | split->bi_bdev = tmp_dev->bdev; | |
488 | split->bi_iter.bi_sector = sector + zone->dev_start + | |
489 | tmp_dev->data_offset; | |
490 | ||
796a5cf0 | 491 | if (unlikely((bio_op(split) == REQ_OP_DISCARD) && |
20d0189b KO |
492 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { |
493 | /* Just ignore it */ | |
4246a0b6 | 494 | bio_endio(split); |
20d0189b KO |
495 | } else |
496 | generic_make_request(split); | |
497 | } while (split != bio); | |
1da177e4 | 498 | } |
8299d7f7 | 499 | |
fd01b88c | 500 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
1da177e4 | 501 | { |
9d8f0363 | 502 | seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); |
1da177e4 LT |
503 | return; |
504 | } | |
505 | ||
fd01b88c | 506 | static void *raid0_takeover_raid45(struct mddev *mddev) |
9af204cf | 507 | { |
3cb03002 | 508 | struct md_rdev *rdev; |
e373ab10 | 509 | struct r0conf *priv_conf; |
9af204cf TM |
510 | |
511 | if (mddev->degraded != 1) { | |
b5a20961 N |
512 | printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", |
513 | mdname(mddev), | |
9af204cf TM |
514 | mddev->degraded); |
515 | return ERR_PTR(-EINVAL); | |
516 | } | |
517 | ||
dafb20fa | 518 | rdev_for_each(rdev, mddev) { |
9af204cf TM |
519 | /* check slot number for a disk */ |
520 | if (rdev->raid_disk == mddev->raid_disks-1) { | |
b5a20961 N |
521 | printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", |
522 | mdname(mddev)); | |
9af204cf TM |
523 | return ERR_PTR(-EINVAL); |
524 | } | |
eea136d6 | 525 | rdev->sectors = mddev->dev_sectors; |
9af204cf TM |
526 | } |
527 | ||
528 | /* Set new parameters */ | |
529 | mddev->new_level = 0; | |
001048a3 | 530 | mddev->new_layout = 0; |
9af204cf TM |
531 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
532 | mddev->raid_disks--; | |
533 | mddev->delta_disks = -1; | |
534 | /* make sure it will be not marked as dirty */ | |
535 | mddev->recovery_cp = MaxSector; | |
536 | ||
537 | create_strip_zones(mddev, &priv_conf); | |
538 | return priv_conf; | |
539 | } | |
540 | ||
fd01b88c | 541 | static void *raid0_takeover_raid10(struct mddev *mddev) |
9af204cf | 542 | { |
e373ab10 | 543 | struct r0conf *priv_conf; |
9af204cf TM |
544 | |
545 | /* Check layout: | |
546 | * - far_copies must be 1 | |
547 | * - near_copies must be 2 | |
548 | * - disks number must be even | |
549 | * - all mirrors must be already degraded | |
550 | */ | |
551 | if (mddev->layout != ((1 << 8) + 2)) { | |
e3d132d1 | 552 | printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n", |
b5a20961 | 553 | mdname(mddev), |
9af204cf TM |
554 | mddev->layout); |
555 | return ERR_PTR(-EINVAL); | |
556 | } | |
557 | if (mddev->raid_disks & 1) { | |
e3d132d1 | 558 | printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n", |
b5a20961 | 559 | mdname(mddev)); |
9af204cf TM |
560 | return ERR_PTR(-EINVAL); |
561 | } | |
562 | if (mddev->degraded != (mddev->raid_disks>>1)) { | |
b5a20961 N |
563 | printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", |
564 | mdname(mddev)); | |
9af204cf TM |
565 | return ERR_PTR(-EINVAL); |
566 | } | |
567 | ||
568 | /* Set new parameters */ | |
569 | mddev->new_level = 0; | |
001048a3 | 570 | mddev->new_layout = 0; |
9af204cf TM |
571 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
572 | mddev->delta_disks = - mddev->raid_disks / 2; | |
573 | mddev->raid_disks += mddev->delta_disks; | |
574 | mddev->degraded = 0; | |
575 | /* make sure it will be not marked as dirty */ | |
576 | mddev->recovery_cp = MaxSector; | |
577 | ||
578 | create_strip_zones(mddev, &priv_conf); | |
9af204cf TM |
579 | return priv_conf; |
580 | } | |
581 | ||
fd01b88c | 582 | static void *raid0_takeover_raid1(struct mddev *mddev) |
fc3a08b8 | 583 | { |
e373ab10 | 584 | struct r0conf *priv_conf; |
24b961f8 | 585 | int chunksect; |
fc3a08b8 KW |
586 | |
587 | /* Check layout: | |
588 | * - (N - 1) mirror drives must be already faulty | |
589 | */ | |
590 | if ((mddev->raid_disks - 1) != mddev->degraded) { | |
591 | printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", | |
592 | mdname(mddev)); | |
593 | return ERR_PTR(-EINVAL); | |
594 | } | |
595 | ||
24b961f8 JS |
596 | /* |
597 | * a raid1 doesn't have the notion of chunk size, so | |
598 | * figure out the largest suitable size we can use. | |
599 | */ | |
600 | chunksect = 64 * 2; /* 64K by default */ | |
601 | ||
602 | /* The array must be an exact multiple of chunksize */ | |
603 | while (chunksect && (mddev->array_sectors & (chunksect - 1))) | |
604 | chunksect >>= 1; | |
605 | ||
606 | if ((chunksect << 9) < PAGE_SIZE) | |
607 | /* array size does not allow a suitable chunk size */ | |
608 | return ERR_PTR(-EINVAL); | |
609 | ||
fc3a08b8 KW |
610 | /* Set new parameters */ |
611 | mddev->new_level = 0; | |
612 | mddev->new_layout = 0; | |
24b961f8 JS |
613 | mddev->new_chunk_sectors = chunksect; |
614 | mddev->chunk_sectors = chunksect; | |
fc3a08b8 | 615 | mddev->delta_disks = 1 - mddev->raid_disks; |
f7bee809 | 616 | mddev->raid_disks = 1; |
fc3a08b8 KW |
617 | /* make sure it will be not marked as dirty */ |
618 | mddev->recovery_cp = MaxSector; | |
619 | ||
620 | create_strip_zones(mddev, &priv_conf); | |
621 | return priv_conf; | |
622 | } | |
623 | ||
fd01b88c | 624 | static void *raid0_takeover(struct mddev *mddev) |
9af204cf TM |
625 | { |
626 | /* raid0 can take over: | |
049d6c1e | 627 | * raid4 - if all data disks are active. |
9af204cf TM |
628 | * raid5 - providing it is Raid4 layout and one disk is faulty |
629 | * raid10 - assuming we have all necessary active disks | |
fc3a08b8 | 630 | * raid1 - with (N -1) mirror drives faulty |
9af204cf | 631 | */ |
a8461a61 N |
632 | |
633 | if (mddev->bitmap) { | |
634 | printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", | |
635 | mdname(mddev)); | |
636 | return ERR_PTR(-EBUSY); | |
637 | } | |
049d6c1e MT |
638 | if (mddev->level == 4) |
639 | return raid0_takeover_raid45(mddev); | |
640 | ||
9af204cf TM |
641 | if (mddev->level == 5) { |
642 | if (mddev->layout == ALGORITHM_PARITY_N) | |
049d6c1e | 643 | return raid0_takeover_raid45(mddev); |
9af204cf | 644 | |
b5a20961 N |
645 | printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", |
646 | mdname(mddev), ALGORITHM_PARITY_N); | |
9af204cf TM |
647 | } |
648 | ||
649 | if (mddev->level == 10) | |
650 | return raid0_takeover_raid10(mddev); | |
651 | ||
fc3a08b8 KW |
652 | if (mddev->level == 1) |
653 | return raid0_takeover_raid1(mddev); | |
654 | ||
655 | printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n", | |
656 | mddev->level); | |
657 | ||
9af204cf TM |
658 | return ERR_PTR(-EINVAL); |
659 | } | |
660 | ||
fd01b88c | 661 | static void raid0_quiesce(struct mddev *mddev, int state) |
9af204cf TM |
662 | { |
663 | } | |
664 | ||
84fc4b56 | 665 | static struct md_personality raid0_personality= |
1da177e4 LT |
666 | { |
667 | .name = "raid0", | |
2604b703 | 668 | .level = 0, |
1da177e4 LT |
669 | .owner = THIS_MODULE, |
670 | .make_request = raid0_make_request, | |
671 | .run = raid0_run, | |
afa0f557 | 672 | .free = raid0_free, |
1da177e4 | 673 | .status = raid0_status, |
80c3a6ce | 674 | .size = raid0_size, |
9af204cf TM |
675 | .takeover = raid0_takeover, |
676 | .quiesce = raid0_quiesce, | |
5c675f83 | 677 | .congested = raid0_congested, |
1da177e4 LT |
678 | }; |
679 | ||
680 | static int __init raid0_init (void) | |
681 | { | |
2604b703 | 682 | return register_md_personality (&raid0_personality); |
1da177e4 LT |
683 | } |
684 | ||
685 | static void raid0_exit (void) | |
686 | { | |
2604b703 | 687 | unregister_md_personality (&raid0_personality); |
1da177e4 LT |
688 | } |
689 | ||
690 | module_init(raid0_init); | |
691 | module_exit(raid0_exit); | |
692 | MODULE_LICENSE("GPL"); | |
0efb9e61 | 693 | MODULE_DESCRIPTION("RAID0 (striping) personality for MD"); |
1da177e4 | 694 | MODULE_ALIAS("md-personality-2"); /* RAID0 */ |
d9d166c2 | 695 | MODULE_ALIAS("md-raid0"); |
2604b703 | 696 | MODULE_ALIAS("md-level-0"); |