]> Git Repo - linux.git/commitdiff
Merge tag 'for-4.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Thu, 6 Jul 2017 18:54:56 +0000 (11:54 -0700)
committerLinus Torvalds <[email protected]>
Thu, 6 Jul 2017 18:54:56 +0000 (11:54 -0700)
Pull device mapper updates from Mike Snitzer:

 - Add the ability to use select or poll /dev/mapper/control to wait for
   events from multiple DM devices.

 - Convert DM's printk macros over to using pr_<level> macros.

 - Add a big-endian variant of plain64 IV to dm-crypt.

 - Add support for zoned (aka SMR) devices to DM core. DM kcopyd was
   also improved to provide a sequential write feature needed by zoned
   devices.

 - Introduce DM zoned target that provides support for host-managed
   zoned devices, the result dm-zoned device acts as a drive-managed
   interface to the underlying host-managed device.

 - A DM raid fix to avoid using BUG() for error handling.

* tag 'for-4.13/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm zoned: fix overflow when converting zone ID to sectors
  dm raid: stop using BUG() in __rdev_sectors()
  dm zoned: drive-managed zoned block device target
  dm kcopyd: add sequential write feature
  dm linear: add support for zoned block devices
  dm flakey: add support for zoned block devices
  dm: introduce dm_remap_zone_report()
  dm: fix REQ_OP_ZONE_REPORT bio handling
  dm: fix REQ_OP_ZONE_RESET bio handling
  dm table: add zoned block devices validation
  dm: convert DM printk macros to pr_<level> macros
  dm crypt: add big-endian variant of plain64 IV
  dm bio prison: use rb_entry() rather than container_of()
  dm ioctl: report event number in DM_LIST_DEVICES
  dm ioctl: add a new DM_DEV_ARM_POLL ioctl
  dm: add basic support for using the select or poll function

1  2 
drivers/md/dm-raid.c
drivers/md/dm.c

diff --combined drivers/md/dm-raid.c
index b4b75dad816ad95c0028f1b64b2be73e8993caac,67b3eb23e771854e1dd6514a63ff4471e27e0396..2e10c2f13a34986c9d8d445e1b00ea94261b2628
@@@ -1571,7 -1571,7 +1571,7 @@@ static sector_t __rdev_sectors(struct r
                        return rdev->sectors;
        }
  
-       BUG(); /* Constructor ensures we got some. */
+       return 0;
  }
  
  /* Calculate the sectors per device and per array used for @rs */
@@@ -1927,7 -1927,7 +1927,7 @@@ struct dm_raid_superblock 
        /********************************************************************
         * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
         *
 -       * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
 +       * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
         */
  
        __le32 flags; /* Flags defining array states for reshaping */
@@@ -2092,11 -2092,6 +2092,11 @@@ static void super_sync(struct mddev *md
        sb->layout = cpu_to_le32(mddev->layout);
        sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
  
 +      /********************************************************************
 +       * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
 +       *
 +       * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
 +       */
        sb->new_level = cpu_to_le32(mddev->new_level);
        sb->new_layout = cpu_to_le32(mddev->new_layout);
        sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@@@ -2443,14 -2438,8 +2443,14 @@@ static int super_validate(struct raid_s
        mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
  
        if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
 -              /* Retrieve device size stored in superblock to be prepared for shrink */
 -              rdev->sectors = le64_to_cpu(sb->sectors);
 +              /*
 +               * Retrieve rdev size stored in superblock to be prepared for shrink.
 +               * Check extended superblock members are present otherwise the size
 +               * will not be set!
 +               */
 +              if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
 +                      rdev->sectors = le64_to_cpu(sb->sectors);
 +
                rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
                if (rdev->recovery_offset == MaxSector)
                        set_bit(In_sync, &rdev->flags);
@@@ -2941,7 -2930,7 +2941,7 @@@ static int raid_ctr(struct dm_target *t
        bool resize;
        struct raid_type *rt;
        unsigned int num_raid_params, num_raid_devs;
-       sector_t calculated_dev_sectors;
+       sector_t calculated_dev_sectors, rdev_sectors;
        struct raid_set *rs = NULL;
        const char *arg;
        struct rs_layout rs_layout;
        if (r)
                goto bad;
  
-       resize = calculated_dev_sectors != __rdev_sectors(rs);
+       rdev_sectors = __rdev_sectors(rs);
+       if (!rdev_sectors) {
+               ti->error = "Invalid rdev size";
+               r = -EINVAL;
+               goto bad;
+       }
+       resize = calculated_dev_sectors != rdev_sectors;
  
        INIT_WORK(&rs->md.event_work, do_table_event);
        ti->private = rs;
diff --combined drivers/md/dm.c
index 40294603530804f121ae2723d43de8b1c892d5d0,96bd13e581cd69c78f61e38ec9364c33109025b4..c2afe7a5755f3f04a9a4d6a029c063e2a5b78326
@@@ -58,6 -58,9 +58,9 @@@ static DECLARE_WORK(deferred_remove_wor
  
  static struct workqueue_struct *deferred_remove_workqueue;
  
+ atomic_t dm_global_event_nr = ATOMIC_INIT(0);
+ DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
  /*
   * One of these is allocated per bio.
   */
@@@ -1009,6 -1012,85 +1012,85 @@@ void dm_accept_partial_bio(struct bio *
  }
  EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
  
+ /*
+  * The zone descriptors obtained with a zone report indicate
+  * zone positions within the target device. The zone descriptors
+  * must be remapped to match their position within the dm device.
+  * A target may call dm_remap_zone_report after completion of a
+  * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
+  * from the target device mapping to the dm device.
+  */
+ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+ {
+ #ifdef CONFIG_BLK_DEV_ZONED
+       struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+       struct bio *report_bio = tio->io->bio;
+       struct blk_zone_report_hdr *hdr = NULL;
+       struct blk_zone *zone;
+       unsigned int nr_rep = 0;
+       unsigned int ofst;
+       struct bio_vec bvec;
+       struct bvec_iter iter;
+       void *addr;
+       if (bio->bi_status)
+               return;
+       /*
+        * Remap the start sector of the reported zones. For sequential zones,
+        * also remap the write pointer position.
+        */
+       bio_for_each_segment(bvec, report_bio, iter) {
+               addr = kmap_atomic(bvec.bv_page);
+               /* Remember the report header in the first page */
+               if (!hdr) {
+                       hdr = addr;
+                       ofst = sizeof(struct blk_zone_report_hdr);
+               } else
+                       ofst = 0;
+               /* Set zones start sector */
+               while (hdr->nr_zones && ofst < bvec.bv_len) {
+                       zone = addr + ofst;
+                       if (zone->start >= start + ti->len) {
+                               hdr->nr_zones = 0;
+                               break;
+                       }
+                       zone->start = zone->start + ti->begin - start;
+                       if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+                               if (zone->cond == BLK_ZONE_COND_FULL)
+                                       zone->wp = zone->start + zone->len;
+                               else if (zone->cond == BLK_ZONE_COND_EMPTY)
+                                       zone->wp = zone->start;
+                               else
+                                       zone->wp = zone->wp + ti->begin - start;
+                       }
+                       ofst += sizeof(struct blk_zone);
+                       hdr->nr_zones--;
+                       nr_rep++;
+               }
+               if (addr != hdr)
+                       kunmap_atomic(addr);
+               if (!hdr->nr_zones)
+                       break;
+       }
+       if (hdr) {
+               hdr->nr_zones = nr_rep;
+               kunmap_atomic(hdr);
+       }
+       bio_advance(report_bio, report_bio->bi_iter.bi_size);
+ #else /* !CONFIG_BLK_DEV_ZONED */
+       bio->bi_status = BLK_STS_NOTSUPP;
+ #endif
+ }
+ EXPORT_SYMBOL_GPL(dm_remap_zone_report);
  /*
   * Flush current->bio_list when the target map method blocks.
   * This fixes deadlocks in snapshot and possibly in other targets.
@@@ -1149,7 -1231,8 +1231,8 @@@ static int clone_bio(struct dm_target_i
                        return r;
        }
  
-       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+       if (bio_op(bio) != REQ_OP_ZONE_REPORT)
+               bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
        clone->bi_iter.bi_size = to_bytes(len);
  
        if (unlikely(bio_integrity(bio) != NULL))
@@@ -1338,7 -1421,11 +1421,11 @@@ static int __split_and_process_non_flus
        if (!dm_target_is_valid(ti))
                return -EIO;
  
-       len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
+       if (bio_op(bio) == REQ_OP_ZONE_REPORT)
+               len = ci->sector_count;
+       else
+               len = min_t(sector_t, max_io_len(ci->sector, ti),
+                           ci->sector_count);
  
        r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
        if (r < 0)
@@@ -1381,6 -1468,10 +1468,10 @@@ static void __split_and_process_bio(str
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
                /* dec_pending submits any data associated with flush */
+       } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
+               ci.bio = bio;
+               ci.sector_count = 0;
+               error = __split_and_process_non_flush(&ci);
        } else {
                ci.bio = bio;
                ci.sector_count = bio_sectors(bio);
@@@ -1534,6 -1625,7 +1625,6 @@@ void dm_init_normal_md_queue(struct map
         * Initialize aspects of queue that aren't relevant for blk-mq
         */
        md->queue->backing_dev_info->congested_fn = dm_any_congested;
 -      blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
  }
  
  static void cleanup_mapped_device(struct mapped_device *md)
@@@ -1759,7 -1851,9 +1850,9 @@@ static void event_callback(void *contex
        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
  
        atomic_inc(&md->event_nr);
+       atomic_inc(&dm_global_event_nr);
        wake_up(&md->eventq);
+       wake_up(&dm_global_eventq);
  }
  
  /*
This page took 0.100264 seconds and 4 git commands to generate.