]> Git Repo - linux.git/commitdiff
Merge tag 'for-6.0/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
authorLinus Torvalds <[email protected]>
Fri, 12 Aug 2022 02:46:48 +0000 (19:46 -0700)
committerLinus Torvalds <[email protected]>
Fri, 12 Aug 2022 02:46:48 +0000 (19:46 -0700)
Pull device mapper fixes from Mike Snitzer:

 - A few fixes for the DM verity and bufio changes in this merge window

 - A smatch warning fix for DM writecache locking in writecache_map

* tag 'for-6.0/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm bufio: fix some cases where the code sleeps with spinlock held
  dm writecache: fix smatch warning about invalid return from writecache_map
  dm verity: fix verity_parse_opt_args parsing
  dm verity: fix DM_VERITY_OPTS_MAX value yet again
  dm bufio: simplify DM_BUFIO_CLIENT_NO_SLEEP locking

1  2 
drivers/md/dm-bufio.c
drivers/md/dm-verity-target.c
drivers/md/dm-writecache.c

diff --combined drivers/md/dm-bufio.c
index acd6d6b474345dbbd8a773ee227979817d0b8aba,799915220e6cc3ed42414cdbf052158737dee77a..09c7ed2650ca4250204f65bac391b0fbb66a2946
@@@ -83,7 -83,7 +83,7 @@@
  struct dm_bufio_client {
        struct mutex lock;
        spinlock_t spinlock;
-       unsigned long spinlock_flags;
+       bool no_sleep;
  
        struct list_head lru[LIST_SIZE];
        unsigned long n_buffers[LIST_SIZE];
@@@ -93,8 -93,6 +93,6 @@@
        s8 sectors_per_block_bits;
        void (*alloc_callback)(struct dm_buffer *);
        void (*write_callback)(struct dm_buffer *);
-       bool no_sleep;
        struct kmem_cache *slab_buffer;
        struct kmem_cache *slab_cache;
        struct dm_io_client *dm_io;
@@@ -174,7 -172,7 +172,7 @@@ static DEFINE_STATIC_KEY_FALSE(no_sleep
  static void dm_bufio_lock(struct dm_bufio_client *c)
  {
        if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
-               spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request());
+               spin_lock_bh(&c->spinlock);
        else
                mutex_lock_nested(&c->lock, dm_bufio_in_request());
  }
  static int dm_bufio_trylock(struct dm_bufio_client *c)
  {
        if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
-               return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags);
+               return spin_trylock_bh(&c->spinlock);
        else
                return mutex_trylock(&c->lock);
  }
  static void dm_bufio_unlock(struct dm_bufio_client *c)
  {
        if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
-               spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags);
+               spin_unlock_bh(&c->spinlock);
        else
                mutex_unlock(&c->lock);
  }
@@@ -592,12 -590,13 +590,12 @@@ static void dmio_complete(unsigned lon
        b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
  }
  
 -static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 +static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
                     unsigned n_sectors, unsigned offset)
  {
        int r;
        struct dm_io_request io_req = {
 -              .bi_op = rw,
 -              .bi_op_flags = 0,
 +              .bi_opf = op,
                .notify.fn = dmio_complete,
                .notify.context = b,
                .client = b->c->dm_io,
@@@ -630,7 -629,7 +628,7 @@@ static void bio_complete(struct bio *bi
        b->end_io(b, status);
  }
  
 -static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
 +static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
                    unsigned n_sectors, unsigned offset)
  {
        struct bio *bio;
        bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
        if (!bio) {
  dmio:
 -              use_dmio(b, rw, sector, n_sectors, offset);
 +              use_dmio(b, op, sector, n_sectors, offset);
                return;
        }
 -      bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
 +      bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
        bio->bi_iter.bi_sector = sector;
        bio->bi_end_io = bio_complete;
        bio->bi_private = b;
@@@ -683,8 -682,7 +681,8 @@@ static inline sector_t block_to_sector(
        return sector;
  }
  
 -static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
 +static void submit_io(struct dm_buffer *b, enum req_op op,
 +                    void (*end_io)(struct dm_buffer *, blk_status_t))
  {
        unsigned n_sectors;
        sector_t sector;
  
        sector = block_to_sector(b->c, b->block);
  
 -      if (rw != REQ_OP_WRITE) {
 +      if (op != REQ_OP_WRITE) {
                n_sectors = b->c->block_size >> SECTOR_SHIFT;
                offset = 0;
        } else {
        }
  
        if (b->data_mode != DATA_MODE_VMALLOC)
 -              use_bio(b, rw, sector, n_sectors, offset);
 +              use_bio(b, op, sector, n_sectors, offset);
        else
 -              use_dmio(b, rw, sector, n_sectors, offset);
 +              use_dmio(b, op, sector, n_sectors, offset);
  }
  
  /*----------------------------------------------------------------
@@@ -817,6 -815,10 +815,10 @@@ static struct dm_buffer *__get_unclaime
                BUG_ON(test_bit(B_WRITING, &b->state));
                BUG_ON(test_bit(B_DIRTY, &b->state));
  
+               if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+                   unlikely(test_bit(B_READING, &b->state)))
+                       continue;
                if (!b->hold_count) {
                        __make_buffer_clean(b);
                        __unlink_buffer(b);
                cond_resched();
        }
  
+       if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+               return NULL;
        list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
                BUG_ON(test_bit(B_READING, &b->state));
  
@@@ -1356,7 -1361,8 +1361,7 @@@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_
  int dm_bufio_issue_flush(struct dm_bufio_client *c)
  {
        struct dm_io_request io_req = {
 -              .bi_op = REQ_OP_WRITE,
 -              .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
 +              .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
@@@ -1379,7 -1385,8 +1384,7 @@@ EXPORT_SYMBOL_GPL(dm_bufio_issue_flush)
  int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
  {
        struct dm_io_request io_req = {
 -              .bi_op = REQ_OP_DISCARD,
 -              .bi_op_flags = REQ_SYNC,
 +              .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
@@@ -1632,7 -1639,8 +1637,8 @@@ static void drop_buffers(struct dm_bufi
   */
  static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
  {
-       if (!(gfp & __GFP_FS)) {
+       if (!(gfp & __GFP_FS) ||
+           (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
                if (test_bit(B_READING, &b->state) ||
                    test_bit(B_WRITING, &b->state) ||
                    test_bit(B_DIRTY, &b->state))
@@@ -1826,8 -1834,7 +1832,8 @@@ struct dm_bufio_client *dm_bufio_client
        c->shrinker.scan_objects = dm_bufio_shrink_scan;
        c->shrinker.seeks = 1;
        c->shrinker.batch = 0;
 -      r = register_shrinker(&c->shrinker);
 +      r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
 +                            MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
        if (r)
                goto bad;
  
index 2347e83902f1a9ad5a5a114d765eb3b9110d5de0,0d70c9c60d463908df6c1ed03ba24a32f63a7aa8..94b6cb599db4f1a1a12eb3f79739ec1a178c1b8d
@@@ -19,7 -19,6 +19,7 @@@
  #include <linux/module.h>
  #include <linux/reboot.h>
  #include <linux/scatterlist.h>
 +#include <linux/string.h>
  #include <linux/jump_label.h>
  
  #define DM_MSG_PREFIX                 "verity"
@@@ -38,7 -37,7 +38,7 @@@
  #define DM_VERITY_OPT_AT_MOST_ONCE    "check_at_most_once"
  #define DM_VERITY_OPT_TASKLET_VERIFY  "try_verify_in_tasklet"
  
- #define DM_VERITY_OPTS_MAX            (3 + DM_VERITY_OPTS_FEC + \
+ #define DM_VERITY_OPTS_MAX            (4 + DM_VERITY_OPTS_FEC + \
                                         DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
  
  static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
@@@ -1053,7 -1052,7 +1053,7 @@@ static int verity_parse_opt_args(struc
                                 struct dm_verity_sig_opts *verify_args,
                                 bool only_modifier_opts)
  {
-       int r;
+       int r = 0;
        unsigned argc;
        struct dm_target *ti = v->ti;
        const char *arg_name;
                        if (r)
                                return r;
                        continue;
+               } else if (only_modifier_opts) {
+                       /*
+                        * Ignore unrecognized opt, could easily be an extra
+                        * argument to an option whose parsing was skipped.
+                        * Normal parsing (@only_modifier_opts=false) will
+                        * properly parse all options (and their extra args).
+                        */
+                       continue;
                }
  
+               DMERR("Unrecognized verity feature request: %s", arg_name);
                ti->error = "Unrecognized verity feature request";
                return -EINVAL;
        } while (argc && !r);
        return r;
  }
  
 +/*
 + * Check whether a DM target is a verity target.
 + */
 +bool dm_is_verity_target(struct dm_target *ti)
 +{
 +      return ti->type->module == THIS_MODULE;
 +}
 +
 +/*
 + * Get the root digest of a verity target.
 + *
 + * Returns a copy of the root digest, the caller is responsible for
 + * freeing the memory of the digest.
 + */
 +int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
 +{
 +      struct dm_verity *v = ti->private;
 +
 +      if (!dm_is_verity_target(ti))
 +              return -EINVAL;
 +
 +      *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
 +      if (*root_digest == NULL)
 +              return -ENOMEM;
 +
 +      *digest_size = v->digest_size;
 +
 +      return 0;
 +}
 +
  static struct target_type verity_target = {
        .name           = "verity",
        .features       = DM_TARGET_IMMUTABLE,
index 1fc161d65673145befcfec71dbbe58e36e586e72,03fe2c5d5e32c08e9b0d6d9c8734d7542fc67848..96a003eb732341812322332c673cd4986ffbf97e
@@@ -523,7 -523,8 +523,7 @@@ static void ssd_commit_flushed(struct d
  
                region.sector += wc->start_sector;
                atomic_inc(&endio.count);
 -              req.bi_op = REQ_OP_WRITE;
 -              req.bi_op_flags = REQ_SYNC;
 +              req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
                req.mem.type = DM_IO_VMA;
                req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
                req.client = wc->dm_io;
@@@ -561,7 -562,8 +561,7 @@@ static void ssd_commit_superblock(struc
  
        region.sector += wc->start_sector;
  
 -      req.bi_op = REQ_OP_WRITE;
 -      req.bi_op_flags = REQ_SYNC | REQ_FUA;
 +      req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA;
        req.mem.type = DM_IO_VMA;
        req.mem.ptr.vma = (char *)wc->memory_map;
        req.client = wc->dm_io;
@@@ -590,7 -592,8 +590,7 @@@ static void writecache_disk_flush(struc
        region.bdev = dev->bdev;
        region.sector = 0;
        region.count = 0;
 -      req.bi_op = REQ_OP_WRITE;
 -      req.bi_op_flags = REQ_PREFLUSH;
 +      req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        req.mem.type = DM_IO_KMEM;
        req.mem.ptr.addr = NULL;
        req.client = wc->dm_io;
@@@ -978,7 -981,8 +978,7 @@@ static int writecache_read_metadata(str
        region.bdev = wc->ssd_dev->bdev;
        region.sector = wc->start_sector;
        region.count = n_sectors;
 -      req.bi_op = REQ_OP_READ;
 -      req.bi_op_flags = REQ_SYNC;
 +      req.bi_opf = REQ_OP_READ | REQ_SYNC;
        req.mem.type = DM_IO_VMA;
        req.mem.ptr.vma = (char *)wc->memory_map;
        req.client = wc->dm_io;
@@@ -1594,7 -1598,8 +1594,8 @@@ done
  
        default:
                BUG();
-               return -1;
+               wc_unlock(wc);
+               return DM_MAPIO_KILL;
        }
  }
  
This page took 0.072801 seconds and 4 git commands to generate.