struct dm_snapshot *s = pe->snap;
mempool_free(pe, s->pending_pool);
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&s->pending_exceptions_count);
}
static void merge_shutdown(struct dm_snapshot *s)
{
clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&s->state_bits, RUNNING_MERGE);
}
* Origin: maps a linear range of a device, with hooks for snapshotting.
*/
+ struct dm_origin {
+ struct dm_dev *dev;
+ unsigned split_boundary;
+ };
+
/*
* Construct an origin mapping: <dev_path>
* The context for an origin is merely a 'struct dm_dev *'
static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- struct dm_dev *dev;
+ struct dm_origin *o;
if (argc != 1) {
ti->error = "origin: incorrect number of arguments";
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
+ o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
+ if (!o) {
+ ti->error = "Cannot allocate private origin structure";
+ r = -ENOMEM;
+ goto bad_alloc;
+ }
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
if (r) {
ti->error = "Cannot get target device";
- return r;
+ goto bad_open;
}
- ti->private = dev;
+ ti->private = o;
ti->num_flush_bios = 1;
return 0;
+
+ bad_open:
+ kfree(o);
+ bad_alloc:
+ return r;
}
static void origin_dtr(struct dm_target *ti)
{
- struct dm_dev *dev = ti->private;
- dm_put_device(ti, dev);
+ struct dm_origin *o = ti->private;
+ dm_put_device(ti, o->dev);
+ kfree(o);
}
static int origin_map(struct dm_target *ti, struct bio *bio)
{
- struct dm_dev *dev = ti->private;
- bio->bi_bdev = dev->bdev;
+ struct dm_origin *o = ti->private;
+ unsigned available_sectors;
- if (bio->bi_rw & REQ_FLUSH)
+ bio->bi_bdev = o->dev->bdev;
+
+ if (unlikely(bio->bi_rw & REQ_FLUSH))
return DM_MAPIO_REMAPPED;
+ if (bio_rw(bio) != WRITE)
+ return DM_MAPIO_REMAPPED;
+
+ available_sectors = o->split_boundary -
+ ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+
+ if (bio_sectors(bio) > available_sectors)
+ dm_accept_partial_bio(bio, available_sectors);
+
/* Only tell snapshots if this is a write */
- return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
+ return do_origin(o->dev, bio);
}
/*
*/
static void origin_resume(struct dm_target *ti)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
- ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
+ o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
}
static void origin_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
switch (type) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s", dev->name);
+ snprintf(result, maxlen, "%s", o->dev->name);
break;
}
}
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size)
{
- struct dm_dev *dev = ti->private;
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct dm_origin *o = ti->private;
+ struct request_queue *q = bdev_get_queue(o->dev->bdev);
if (!q->merge_bvec_fn)
return max_size;
- bvm->bi_bdev = dev->bdev;
+ bvm->bi_bdev = o->dev->bdev;
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
static int origin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
- struct dm_dev *dev = ti->private;
+ struct dm_origin *o = ti->private;
- return fn(ti, dev, 0, ti->len, data);
+ return fn(ti, o->dev, 0, ti->len, data);
}
static struct target_type origin_target = {
}
}
+ static void disable_write_same(struct mapped_device *md)
+ {
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support WRITE SAME, disable it */
+ limits->max_write_same_sectors = 0;
+ }
+
static void clone_endio(struct bio *bio, int error)
{
int r = 0;
}
}
+ if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+ !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
+ disable_write_same(md);
+
free_tio(md, tio);
dec_pending(io, error);
}
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
+ if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
+ !clone->q->limits.max_write_same_sectors))
+ disable_write_same(tio->md);
+
if (r <= 0)
/* The target wants to complete the I/O */
dm_end_request(clone, r);
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+ /*
+ * A target may call dm_accept_partial_bio only from the map routine. It is
+ * allowed for all bio types except REQ_FLUSH.
+ *
+ * dm_accept_partial_bio informs the dm that the target only wants to process
+ * additional n_sectors sectors of the bio and the rest of the data should be
+ * sent in a next bio.
+ *
+ * A diagram that explains the arithmetics:
+ * +--------------------+---------------+-------+
+ * | 1 | 2 | 3 |
+ * +--------------------+---------------+-------+
+ *
+ * <-------------- *tio->len_ptr --------------->
+ * <------- bi_size ------->
+ * <-- n_sectors -->
+ *
+ * Region 1 was already iterated over with bio_advance or similar function.
+ * (it may be empty if the target doesn't use bio_advance)
+ * Region 2 is the remaining bio size that the target wants to process.
+ * (it may be empty if region 1 is non-empty, although there is no reason
+ * to make it empty)
+ * The target requires that region 3 is to be sent in the next bio.
+ *
+ * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
+ * the partially processed part (the sum of regions 1+2) must be the same for all
+ * copies of the bio.
+ */
+ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ {
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+ BUG_ON(bio->bi_rw & REQ_FLUSH);
+ BUG_ON(bi_size > *tio->len_ptr);
+ BUG_ON(n_sectors > bi_size);
+ *tio->len_ptr -= bi_size - n_sectors;
+ bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
+ }
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+
static void __map_bio(struct dm_target_io *tio)
{
int r;
struct bio *bio;
struct dm_io *io;
sector_t sector;
- sector_t sector_count;
+ unsigned sector_count;
};
- static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = to_bytes(len);
static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target *ti,
- unsigned target_bio_nr, sector_t len)
+ unsigned target_bio_nr, unsigned *len)
{
struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
+ tio->len_ptr = len;
+
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
*/
__bio_clone_fast(clone, ci->bio);
if (len)
- bio_setup_sector(clone, ci->sector, len);
+ bio_setup_sector(clone, ci->sector, *len);
__map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios, sector_t len)
+ unsigned num_bios, unsigned *len)
{
unsigned target_bio_nr;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
return 0;
}
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
tio = alloc_tio(ci, ti, 0, target_bio_nr);
- clone_bio(tio, bio, sector, len);
+ tio->len_ptr = len;
+ clone_bio(tio, bio, sector, *len);
__map_bio(tio);
}
}
is_split_required_fn is_split_required)
{
struct dm_target *ti;
- sector_t len;
+ unsigned len;
unsigned num_bios;
do {
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
- len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
else
- len = min(ci->sector_count, max_io_len(ci->sector, ti));
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
ci->sector += len;
} while (ci->sector_count -= len);
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- __clone_and_map_data_bio(ci, ti, ci->sector, len);
+ __clone_and_map_data_bio(ci, ti, ci->sector, &len);
ci->sector += len;
ci->sector_count -= len;
* just one page.
*/
else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
-
max_size = 0;
out:
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
- clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
queue_work(md->wq, &md->work);
}