]> Git Repo - linux.git/commitdiff
Merge tag 'for-6.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Wed, 15 May 2024 01:34:19 +0000 (18:34 -0700)
committerLinus Torvalds <[email protected]>
Wed, 15 May 2024 01:34:19 +0000 (18:34 -0700)
Pull device mapper updates from Mike Snitzer:

 - Add a dm-crypt optional "high_priority" flag that enables the crypt
   workqueues to use WQ_HIGHPRI.

 - Export dm-crypt workqueues via sysfs (by enabling WQ_SYSFS) to allow
   for improved visibility and controls over IO and crypt workqueues.

 - Fix dm-crypt to no longer constrain max_segment_size to PAGE_SIZE.
   This limit isn't needed given that the block core provides late bio
   splitting if bio exceeds underlying limits (e.g. max_segment_size).

 - Fix dm-crypt crypt_queue's use of WQ_UNBOUND to not use
   WQ_CPU_INTENSIVE because it is meaningless with WQ_UNBOUND.

 - Fix various issues with dm-delay target (ranging from a resource
   teardown fix, a fix for hung task when using kthread mode, and other
   improvements that followed from code inspection).

* tag 'for-6.10/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm-delay: remove timer_lock
  dm-delay: change locking to avoid contention
  dm-delay: fix max_delay calculations
  dm-delay: fix hung task introduced by kthread mode
  dm-delay: fix workqueue delay_timer race
  dm-crypt: don't set WQ_CPU_INTENSIVE for WQ_UNBOUND crypt_queue
  dm: use queue_limits_set
  dm-crypt: stop constraining max_segment_size to PAGE_SIZE
  dm-crypt: export sysfs of all workqueues
  dm-crypt: add the optional "high_priority" flag

1  2 
drivers/md/dm-table.c

diff --combined drivers/md/dm-table.c
index 2c6fbd87363f296f9c991cca8340f97cb99d43b5,88114719fe187ad42905424b2f5e685bf3e21e17..cc66a27c363a65f2e034706ff0073b327d71f14b
@@@ -1963,26 -1963,27 +1963,27 @@@ int dm_table_set_restrictions(struct dm
        bool wc = false, fua = false;
        int r;
  
-       /*
-        * Copy table's limits to the DM device's request_queue
-        */
-       q->limits = *limits;
        if (dm_table_supports_nowait(t))
                blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
        else
                blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
  
        if (!dm_table_supports_discards(t)) {
-               q->limits.max_discard_sectors = 0;
-               q->limits.max_hw_discard_sectors = 0;
-               q->limits.discard_granularity = 0;
-               q->limits.discard_alignment = 0;
-               q->limits.discard_misaligned = 0;
+               limits->max_hw_discard_sectors = 0;
+               limits->discard_granularity = 0;
+               limits->discard_alignment = 0;
+               limits->discard_misaligned = 0;
        }
  
+       if (!dm_table_supports_write_zeroes(t))
+               limits->max_write_zeroes_sectors = 0;
        if (!dm_table_supports_secure_erase(t))
-               q->limits.max_secure_erase_sectors = 0;
+               limits->max_secure_erase_sectors = 0;
+       r = queue_limits_set(q, limits);
+       if (r)
+               return r;
  
        if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
                wc = true;
        else
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  
-       if (!dm_table_supports_write_zeroes(t))
-               q->limits.max_write_zeroes_sectors = 0;
        dm_table_verify_integrity(t);
  
        /*
                r = dm_set_zones_restrictions(t, q);
                if (r)
                        return r;
 -              if (!static_key_enabled(&zoned_enabled.key))
 +              if (blk_queue_is_zoned(q) &&
 +                  !static_key_enabled(&zoned_enabled.key))
                        static_branch_enable(&zoned_enabled);
        }
  
        dm_update_crypto_profile(q, t);
-       disk_update_readahead(t->md->disk);
  
        /*
         * Check for request-based device is left to
This page took 0.063368 seconds and 4 git commands to generate.