#define NR_RESERVED_BUFS 32
-static mdk_personality_t multipath_personality;
-
-
static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
{
struct multipath_bh *mpb;
- mpb = kmalloc(sizeof(*mpb), gfp_flags);
- if (mpb)
- memset(mpb, 0, sizeof(*mpb));
+ mpb = kzalloc(sizeof(*mpb), gfp_flags);
return mpb;
}
rcu_read_lock();
for (i = 0; i < disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && rdev->in_sync) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
return i;
rcu_read_lock();
for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)
+ && atomic_read(&rdev->nr_pending)) {
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
multipath_conf_t *conf = mddev_to_conf(mddev);
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
+ const int rw = bio_data_dir(bio);
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
- if (bio_data_dir(bio)==WRITE) {
- disk_stat_inc(mddev->gendisk, writes);
- disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
- } else {
- disk_stat_inc(mddev->gendisk, reads);
- disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
- }
+ disk_stat_inc(mddev->gendisk, ios[rw]);
+ disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
- conf->multipaths[i].rdev->in_sync ? "U" : "_");
+ test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
rcu_read_lock();
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = conf->multipaths[i].rdev;
- if (rdev && !rdev->faulty) {
+ mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
request_queue_t *r_queue = bdev_get_queue(bdev);
/*
* Mark disk as unusable
*/
- if (!rdev->faulty) {
+ if (!test_bit(Faulty, &rdev->flags)) {
char b[BDEVNAME_SIZE];
- rdev->in_sync = 0;
- rdev->faulty = 1;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Faulty, &rdev->flags);
mddev->sb_dirty = 1;
conf->working_disks--;
printk(KERN_ALERT "multipath: IO failure on %s,"
tmp = conf->multipaths + i;
if (tmp->rdev)
printk(" disk%d, o:%d, dev:%s\n",
- i,!tmp->rdev->faulty,
+ i,!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
multipath_conf_t *conf = mddev->private;
+ struct request_queue *q;
int found = 0;
int path;
struct multipath_info *p;
for (path=0; path<mddev->raid_disks; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ q = rdev->bdev->bd_disk->queue;
+ blk_queue_stack_limits(mddev->queue, q);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+ if (q->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
rdev->raid_disk = path;
- rdev->in_sync = 1;
- p->rdev = rdev;
+ set_bit(In_sync, &rdev->flags);
+ rcu_assign_pointer(p->rdev, rdev);
found = 1;
}
rdev = p->rdev;
if (rdev) {
- if (rdev->in_sync ||
+ if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified" " but is still operational!\n", number);
err = -EBUSY;
* should be freed in multipath_stop()]
*/
- conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL);
+ conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
mdname(mddev));
goto out;
}
- memset(conf, 0, sizeof(*conf));
- conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks,
+ conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
mdname(mddev));
goto out_free_conf;
}
- memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
conf->working_disks = 0;
ITERATE_RDEV(mddev,rdev,tmp) {
mddev->queue->max_sectors > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
- if (!rdev->faulty)
+ if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
}
return 0;
}
-static mdk_personality_t multipath_personality=
+static struct mdk_personality multipath_personality =
{
.name = "multipath",
+ .level = LEVEL_MULTIPATH,
.owner = THIS_MODULE,
.make_request = multipath_make_request,
.run = multipath_run,
static int __init multipath_init (void)
{
- return register_md_personality (MULTIPATH, &multipath_personality);
+ return register_md_personality (&multipath_personality);
}
static void __exit multipath_exit (void)
{
- unregister_md_personality (MULTIPATH);
+ unregister_md_personality (&multipath_personality);
}
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
+MODULE_ALIAS("md-multipath");
+MODULE_ALIAS("md-level--4");