]> Git Repo - J-linux.git/commitdiff
Merge tag 'for-5.9/block-merge-20200804' of git://git.kernel.dk/linux-block
authorLinus Torvalds <[email protected]>
Wed, 5 Aug 2020 18:12:34 +0000 (11:12 -0700)
committerLinus Torvalds <[email protected]>
Wed, 5 Aug 2020 18:12:34 +0000 (11:12 -0700)
Pull block stacking updates from Jens Axboe:
 "The stacking related fixes depended on both the core block and drivers
  branches, so here's a topic branch with that change.

  Outside of that, a late fix from Johannes for zone revalidation"

* tag 'for-5.9/block-merge-20200804' of git://git.kernel.dk/linux-block:
  block: don't do revalidate zones on invalid devices
  block: remove blk_queue_stack_limits
  block: remove bdev_stack_limits
  block: inherit the zoned characteristics in blk_stack_limits

1  2 
drivers/block/drbd/drbd_nl.c
drivers/md/dm-table.c
drivers/nvme/host/core.c
include/linux/blkdev.h

index c0017cc51ecc74ff2463af252f7e6fa446ae8ce1,d0d9a549b58388bd94ececd75ae7508bdbb4848d..28eb078f8b754d669e39129e8d7bb8e4c55f5b48
@@@ -1250,7 -1250,7 +1250,7 @@@ static void fixup_discard_if_not_suppor
  
  static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
  {
-       /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
+       /* Fixup max_write_zeroes_sectors after blk_stack_limits():
         * if we can handle "zeroes" efficiently on the protocol,
         * we want to do that, even if our backend does not announce
         * max_write_zeroes_sectors itself. */
@@@ -1361,7 -1361,7 +1361,7 @@@ static void drbd_setup_queue_param(stru
        decide_on_write_same_support(device, q, b, o, disable_write_same);
  
        if (b) {
-               blk_queue_stack_limits(q, b);
+               blk_stack_limits(&q->limits, &b->limits, 0);
  
                if (q->backing_dev_info->ra_pages !=
                    b->backing_dev_info->ra_pages) {
@@@ -3423,7 -3423,7 +3423,7 @@@ int drbd_adm_dump_devices(struct sk_buf
  {
        struct nlattr *resource_filter;
        struct drbd_resource *resource;
 -      struct drbd_device *uninitialized_var(device);
 +      struct drbd_device *device;
        int minor, err, retcode;
        struct drbd_genlmsghdr *dh;
        struct device_info device_info;
@@@ -3512,7 -3512,7 +3512,7 @@@ int drbd_adm_dump_connections(struct sk
  {
        struct nlattr *resource_filter;
        struct drbd_resource *resource = NULL, *next_resource;
 -      struct drbd_connection *uninitialized_var(connection);
 +      struct drbd_connection *connection;
        int err = 0, retcode;
        struct drbd_genlmsghdr *dh;
        struct connection_info connection_info;
@@@ -3674,7 -3674,7 +3674,7 @@@ int drbd_adm_dump_peer_devices(struct s
  {
        struct nlattr *resource_filter;
        struct drbd_resource *resource;
 -      struct drbd_device *uninitialized_var(device);
 +      struct drbd_device *device;
        struct drbd_peer_device *peer_device = NULL;
        int minor, err, retcode;
        struct drbd_genlmsghdr *dh;
diff --combined drivers/md/dm-table.c
index 5c4cb0dcff1ee6c6c59cef43c3442bd0446343ab,aac4c31cfc8498b5b4a137f58bd72818a1c28cfa..5edc3079e7c1990409a57551a8116d80919e3fc2
@@@ -458,7 -458,8 +458,8 @@@ static int dm_set_device_limits(struct 
                return 0;
        }
  
-       if (bdev_stack_limits(limits, bdev, start) < 0)
+       if (blk_stack_limits(limits, &q->limits,
+                       get_start_sect(bdev) + start) < 0)
                DMWARN("%s: adding target device %s caused an alignment inconsistency: "
                       "physical_block_size=%u, logical_block_size=%u, "
                       "alignment_offset=%u, start=%llu",
                       q->limits.logical_block_size,
                       q->limits.alignment_offset,
                       (unsigned long long) start << SECTOR_SHIFT);
-       limits->zoned = blk_queue_zoned_model(q);
        return 0;
  }
  
@@@ -639,7 -637,7 +637,7 @@@ static int validate_hardware_logical_bl
         */
        unsigned short remaining = 0;
  
 -      struct dm_target *uninitialized_var(ti);
 +      struct dm_target *ti;
        struct queue_limits ti_limits;
        unsigned i;
  
@@@ -1528,22 -1526,6 +1526,6 @@@ combine_limits
                               dm_device_name(table->md),
                               (unsigned long long) ti->begin,
                               (unsigned long long) ti->len);
-               /*
-                * FIXME: this should likely be moved to blk_stack_limits(), would
-                * also eliminate limits->zoned stacking hack in dm_set_device_limits()
-                */
-               if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
-                       /*
-                        * By default, the stacked limits zoned model is set to
-                        * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
-                        * this model using the first target model reported
-                        * that is not BLK_ZONED_NONE. This will be either the
-                        * first target device zoned model or the model reported
-                        * by the target .io_hints.
-                        */
-                       limits->zoned = ti_limits.zoned;
-               }
        }
  
        /*
diff --combined drivers/nvme/host/core.c
index 767d62985bba28cc0870a6e28cb5ece24b72b96e,aa2b66edba5e019acf2efbc52e92d989b87ea4d5..88cff309d8e4f06a18661f8c10f8ff318a835834
@@@ -366,16 -366,6 +366,16 @@@ bool nvme_change_ctrl_state(struct nvme
                        break;
                }
                break;
 +      case NVME_CTRL_DELETING_NOIO:
 +              switch (old_state) {
 +              case NVME_CTRL_DELETING:
 +              case NVME_CTRL_DEAD:
 +                      changed = true;
 +                      /* FALLTHRU */
 +              default:
 +                      break;
 +              }
 +              break;
        case NVME_CTRL_DEAD:
                switch (old_state) {
                case NVME_CTRL_DELETING:
@@@ -413,7 -403,6 +413,7 @@@ static bool nvme_state_terminal(struct 
        case NVME_CTRL_CONNECTING:
                return false;
        case NVME_CTRL_DELETING:
 +      case NVME_CTRL_DELETING_NOIO:
        case NVME_CTRL_DEAD:
                return true;
        default:
@@@ -465,11 -454,10 +465,11 @@@ static void nvme_free_ns(struct kref *k
        kfree(ns);
  }
  
 -static void nvme_put_ns(struct nvme_ns *ns)
 +void nvme_put_ns(struct nvme_ns *ns)
  {
        kref_put(&ns->kref, nvme_free_ns);
  }
 +EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
  
  static inline void nvme_clear_nvme_request(struct request *req)
  {
@@@ -605,14 -593,6 +605,14 @@@ static void nvme_assign_write_stream(st
                req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9;
  }
  
 +static void nvme_setup_passthrough(struct request *req,
 +              struct nvme_command *cmd)
 +{
 +      memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
 +      /* passthru commands should let the driver set the SGL flags */
 +      cmd->common.flags &= ~NVME_CMD_SGL_ALL;
 +}
 +
  static inline void nvme_setup_flush(struct nvme_ns *ns,
                struct nvme_command *cmnd)
  {
@@@ -778,7 -758,7 +778,7 @@@ blk_status_t nvme_setup_cmd(struct nvme
        switch (req_op(req)) {
        case REQ_OP_DRV_IN:
        case REQ_OP_DRV_OUT:
 -              memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
 +              nvme_setup_passthrough(req, cmd);
                break;
        case REQ_OP_FLUSH:
                nvme_setup_flush(ns, cmd);
@@@ -929,120 -909,6 +929,120 @@@ out
        return ERR_PTR(ret);
  }
  
 +static u32 nvme_known_admin_effects(u8 opcode)
 +{
 +      switch (opcode) {
 +      case nvme_admin_format_nvm:
 +              return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
 +                      NVME_CMD_EFFECTS_CSE_MASK;
 +      case nvme_admin_sanitize_nvm:
 +              return NVME_CMD_EFFECTS_CSE_MASK;
 +      default:
 +              break;
 +      }
 +      return 0;
 +}
 +
 +u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
 +{
 +      u32 effects = 0;
 +
 +      if (ns) {
 +              if (ns->head->effects)
 +                      effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
 +              if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
 +                      dev_warn(ctrl->device,
 +                               "IO command:%02x has unhandled effects:%08x\n",
 +                               opcode, effects);
 +              return 0;
 +      }
 +
 +      if (ctrl->effects)
 +              effects = le32_to_cpu(ctrl->effects->acs[opcode]);
 +      effects |= nvme_known_admin_effects(opcode);
 +
 +      return effects;
 +}
 +EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
 +
 +static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 +                             u8 opcode)
 +{
 +      u32 effects = nvme_command_effects(ctrl, ns, opcode);
 +
 +      /*
 +       * For simplicity, IO to all namespaces is quiesced even if the command
 +       * effects say only one namespace is affected.
 +       */
 +      if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 +              mutex_lock(&ctrl->scan_lock);
 +              mutex_lock(&ctrl->subsys->lock);
 +              nvme_mpath_start_freeze(ctrl->subsys);
 +              nvme_mpath_wait_freeze(ctrl->subsys);
 +              nvme_start_freeze(ctrl);
 +              nvme_wait_freeze(ctrl);
 +      }
 +      return effects;
 +}
 +
 +static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
 +{
 +      struct nvme_ns *ns;
 +
 +      down_read(&ctrl->namespaces_rwsem);
 +      list_for_each_entry(ns, &ctrl->namespaces, list)
 +              if (_nvme_revalidate_disk(ns->disk))
 +                      nvme_set_queue_dying(ns);
 +              else if (blk_queue_is_zoned(ns->disk->queue)) {
 +                      /*
 +                       * IO commands are required to fully revalidate a zoned
 +                       * device. Force the command effects to trigger rescan
 +                       * work so report zones can run in a context with
 +                       * unfrozen IO queues.
 +                       */
 +                      *effects |= NVME_CMD_EFFECTS_NCC;
 +              }
 +      up_read(&ctrl->namespaces_rwsem);
 +}
 +
 +static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
 +{
 +      /*
 +       * Revalidate LBA changes prior to unfreezing. This is necessary to
 +       * prevent memory corruption if a logical block size was changed by
 +       * this command.
 +       */
 +      if (effects & NVME_CMD_EFFECTS_LBCC)
 +              nvme_update_formats(ctrl, &effects);
 +      if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 +              nvme_unfreeze(ctrl);
 +              nvme_mpath_unfreeze(ctrl->subsys);
 +              mutex_unlock(&ctrl->subsys->lock);
 +              nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 +              mutex_unlock(&ctrl->scan_lock);
 +      }
 +      if (effects & NVME_CMD_EFFECTS_CCC)
 +              nvme_init_identify(ctrl);
 +      if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
 +              nvme_queue_scan(ctrl);
 +              flush_work(&ctrl->scan_work);
 +      }
 +}
 +
 +void nvme_execute_passthru_rq(struct request *rq)
 +{
 +      struct nvme_command *cmd = nvme_req(rq)->cmd;
 +      struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
 +      struct nvme_ns *ns = rq->q->queuedata;
 +      struct gendisk *disk = ns ? ns->disk : NULL;
 +      u32 effects;
 +
 +      effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
 +      blk_execute_rq(rq->q, disk, rq, 0);
 +      nvme_passthru_end(ctrl, effects);
 +}
 +EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
 +
  static int nvme_submit_user_cmd(struct request_queue *q,
                struct nvme_command *cmd, void __user *ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
                }
        }
  
 -      blk_execute_rq(req->q, disk, req, 0);
 +      nvme_execute_passthru_rq(req);
        if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
                ret = -EINTR;
        else
@@@ -1274,9 -1140,6 +1274,9 @@@ static int nvme_identify_ns_descs(struc
        int status, pos, len;
        void *data;
  
 +      if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
 +              return 0;
 +
        c.identify.opcode = nvme_admin_identify;
        c.identify.nsid = cpu_to_le32(nsid);
        c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
        if (status) {
                dev_warn(ctrl->device,
                        "Identify Descriptors failed (%d)\n", status);
 -               /*
 -                * Don't treat non-retryable errors as fatal, as we potentially
 -                * already have a NGUID or EUI-64.  If we failed with DNR set,
 -                * we want to silently ignore the error as we can still
 -                * identify the device, but if the status has DNR set, we want
 -                * to propagate the error back specifically for the disk
 -                * revalidation flow to make sure we don't abandon the
 -                * device just because of a temporal retry-able error (such
 -                * as path of transport errors).
 -                */
 -              if (status > 0 && (status & NVME_SC_DNR) && !nvme_multi_css(ctrl))
 -                      status = 0;
                goto free_data;
        }
  
@@@ -1500,12 -1375,105 +1500,12 @@@ static int nvme_submit_io(struct nvme_n
                        metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
  }
  
 -static u32 nvme_known_admin_effects(u8 opcode)
 -{
 -      switch (opcode) {
 -      case nvme_admin_format_nvm:
 -              return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
 -                                      NVME_CMD_EFFECTS_CSE_MASK;
 -      case nvme_admin_sanitize_nvm:
 -              return NVME_CMD_EFFECTS_CSE_MASK;
 -      default:
 -              break;
 -      }
 -      return 0;
 -}
 -
 -static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 -                                                              u8 opcode)
 -{
 -      u32 effects = 0;
 -
 -      if (ns) {
 -              if (ns->head->effects)
 -                      effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
 -              if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
 -                      dev_warn(ctrl->device,
 -                               "IO command:%02x has unhandled effects:%08x\n",
 -                               opcode, effects);
 -              return 0;
 -      }
 -
 -      if (ctrl->effects)
 -              effects = le32_to_cpu(ctrl->effects->acs[opcode]);
 -      effects |= nvme_known_admin_effects(opcode);
 -
 -      /*
 -       * For simplicity, IO to all namespaces is quiesced even if the command
 -       * effects say only one namespace is affected.
 -       */
 -      if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 -              mutex_lock(&ctrl->scan_lock);
 -              mutex_lock(&ctrl->subsys->lock);
 -              nvme_mpath_start_freeze(ctrl->subsys);
 -              nvme_mpath_wait_freeze(ctrl->subsys);
 -              nvme_start_freeze(ctrl);
 -              nvme_wait_freeze(ctrl);
 -      }
 -      return effects;
 -}
 -
 -static void nvme_update_formats(struct nvme_ctrl *ctrl, u32 *effects)
 -{
 -      struct nvme_ns *ns;
 -
 -      down_read(&ctrl->namespaces_rwsem);
 -      list_for_each_entry(ns, &ctrl->namespaces, list)
 -              if (_nvme_revalidate_disk(ns->disk))
 -                      nvme_set_queue_dying(ns);
 -              else if (blk_queue_is_zoned(ns->disk->queue)) {
 -                      /*
 -                       * IO commands are required to fully revalidate a zoned
 -                       * device. Force the command effects to trigger rescan
 -                       * work so report zones can run in a context with
 -                       * unfrozen IO queues.
 -                       */
 -                      *effects |= NVME_CMD_EFFECTS_NCC;
 -              }
 -      up_read(&ctrl->namespaces_rwsem);
 -}
 -
 -static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
 -{
 -      /*
 -       * Revalidate LBA changes prior to unfreezing. This is necessary to
 -       * prevent memory corruption if a logical block size was changed by
 -       * this command.
 -       */
 -      if (effects & NVME_CMD_EFFECTS_LBCC)
 -              nvme_update_formats(ctrl, &effects);
 -      if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
 -              nvme_unfreeze(ctrl);
 -              nvme_mpath_unfreeze(ctrl->subsys);
 -              mutex_unlock(&ctrl->subsys->lock);
 -              nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 -              mutex_unlock(&ctrl->scan_lock);
 -      }
 -      if (effects & NVME_CMD_EFFECTS_CCC)
 -              nvme_init_identify(ctrl);
 -      if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
 -              nvme_queue_scan(ctrl);
 -              flush_work(&ctrl->scan_work);
 -      }
 -}
 -
  static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                        struct nvme_passthru_cmd __user *ucmd)
  {
        struct nvme_passthru_cmd cmd;
        struct nvme_command c;
        unsigned timeout = 0;
 -      u32 effects;
        u64 result;
        int status;
  
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
  
 -      effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
                        nvme_to_user_ptr(cmd.addr), cmd.data_len,
                        nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
                        0, &result, timeout);
 -      nvme_passthru_end(ctrl, effects);
  
        if (status >= 0) {
                if (put_user(result, &ucmd->result))
@@@ -1551,6 -1521,7 +1551,6 @@@ static int nvme_user_cmd64(struct nvme_
        struct nvme_passthru_cmd64 cmd;
        struct nvme_command c;
        unsigned timeout = 0;
 -      u32 effects;
        int status;
  
        if (!capable(CAP_SYS_ADMIN))
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
  
 -      effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
                        nvme_to_user_ptr(cmd.addr), cmd.data_len,
                        nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
                        0, &cmd.result, timeout);
 -      nvme_passthru_end(ctrl, effects);
  
        if (status >= 0) {
                if (put_user(cmd.result, &ucmd->result))
@@@ -2081,7 -2054,8 +2081,8 @@@ static int __nvme_revalidate_disk(struc
  #ifdef CONFIG_NVME_MULTIPATH
        if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
-               blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+               blk_stack_limits(&ns->head->disk->queue->limits,
+                                &ns->queue->limits, 0);
                nvme_mpath_update_disk_size(ns->head->disk);
        }
  #endif
@@@ -2373,7 -2347,12 +2374,7 @@@ EXPORT_SYMBOL_GPL(nvme_disable_ctrl)
  
  int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
  {
 -      /*
 -       * Default to a 4K page size, with the intention to update this
 -       * path in the future to accomodate architectures with differing
 -       * kernel and IO page sizes.
 -       */
 -      unsigned dev_page_min, page_shift = 12;
 +      unsigned dev_page_min;
        int ret;
  
        ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
        }
        dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
  
 -      if (page_shift < dev_page_min) {
 +      if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
                dev_err(ctrl->device,
                        "Minimum device page size %u too large for host (%u)\n",
 -                      1 << dev_page_min, 1 << page_shift);
 +                      1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
                return -ENODEV;
        }
  
 -      ctrl->page_size = 1 << page_shift;
 -
        if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
                ctrl->ctrl_config = NVME_CC_CSS_CSI;
        else
                ctrl->ctrl_config = NVME_CC_CSS_NVM;
 -      ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
 +      ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
        ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
        ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
        ctrl->ctrl_config |= NVME_CC_ENABLE;
@@@ -2444,13 -2425,13 +2445,13 @@@ static void nvme_set_queue_limits(struc
  
        if (ctrl->max_hw_sectors) {
                u32 max_segments =
 -                      (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 +                      (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
  
                max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
 -      blk_queue_virt_boundary(q, ctrl->page_size - 1);
 +      blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
        blk_queue_dma_alignment(q, 7);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
@@@ -3504,7 -3485,6 +3505,7 @@@ static ssize_t nvme_sysfs_show_state(st
                [NVME_CTRL_RESETTING]   = "resetting",
                [NVME_CTRL_CONNECTING]  = "connecting",
                [NVME_CTRL_DELETING]    = "deleting",
 +              [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
                [NVME_CTRL_DEAD]        = "dead",
        };
  
@@@ -3608,8 -3588,8 +3609,8 @@@ static ssize_t nvme_ctrl_reconnect_dela
        int err;
  
        err = kstrtou32(buf, 10, &v);
 -      if (err || v > UINT_MAX)
 -              return -EINVAL;
 +      if (err)
 +              return err;
  
        ctrl->opts->reconnect_delay = v;
        return count;
@@@ -3820,7 -3800,7 +3821,7 @@@ static int ns_cmp(void *priv, struct li
        return nsa->head->ns_id - nsb->head->ns_id;
  }
  
 -static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 +struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
  {
        struct nvme_ns *ns, *ret = NULL;
  
        up_read(&ctrl->namespaces_rwsem);
        return ret;
  }
 +EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
  
  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
  {
@@@ -4142,9 -4121,6 +4143,9 @@@ void nvme_remove_namespaces(struct nvme
        if (ctrl->state == NVME_CTRL_DEAD)
                nvme_kill_queues(ctrl);
  
 +      /* this is a no-op when called from the controller reset handler */
 +      nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
 +
        down_write(&ctrl->namespaces_rwsem);
        list_splice_init(&ctrl->namespaces, &ns_list);
        up_write(&ctrl->namespaces_rwsem);
@@@ -4339,7 -4315,8 +4340,7 @@@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl)
  
  void nvme_start_ctrl(struct nvme_ctrl *ctrl)
  {
 -      if (ctrl->kato)
 -              nvme_start_keep_alive(ctrl);
 +      nvme_start_keep_alive(ctrl);
  
        nvme_enable_aen(ctrl);
  
@@@ -4584,29 -4561,6 +4585,29 @@@ void nvme_sync_queues(struct nvme_ctrl 
  }
  EXPORT_SYMBOL_GPL(nvme_sync_queues);
  
 +struct nvme_ctrl *nvme_ctrl_get_by_path(const char *path)
 +{
 +      struct nvme_ctrl *ctrl;
 +      struct file *f;
 +
 +      f = filp_open(path, O_RDWR, 0);
 +      if (IS_ERR(f))
 +              return ERR_CAST(f);
 +
 +      if (f->f_op != &nvme_dev_fops) {
 +              ctrl = ERR_PTR(-EINVAL);
 +              goto out_close;
 +      }
 +
 +      ctrl = f->private_data;
 +      nvme_get_ctrl(ctrl);
 +
 +out_close:
 +      filp_close(f, NULL);
 +      return ctrl;
 +}
 +EXPORT_SYMBOL_NS_GPL(nvme_ctrl_get_by_path, NVME_TARGET_PASSTHRU);
 +
  /*
   * Check we didn't inadvertently grow the command structure sizes:
   */
diff --combined include/linux/blkdev.h
index 9273d1126ed2e1e07c5285d7e48a0728efd5a9a2,bbdd3cf620381a80633b3368cf55f708c6e44434..bb5636cc17b91a75c83e6f87b391abc074d0bb70
@@@ -306,11 -306,14 +306,14 @@@ enum blk_queue_state 
  
  /*
   * Zoned block device models (zoned limit).
+  *
+  * Note: This needs to be ordered from the least to the most severe
+  * restrictions for the inheritance in blk_stack_limits() to work.
   */
  enum blk_zoned_model {
-       BLK_ZONED_NONE, /* Regular block device */
-       BLK_ZONED_HA,   /* Host-aware zoned block device */
-       BLK_ZONED_HM,   /* Host-managed zoned block device */
+       BLK_ZONED_NONE = 0,     /* Regular block device */
+       BLK_ZONED_HA,           /* Host-aware zoned block device */
+       BLK_ZONED_HM,           /* Host-managed zoned block device */
  };
  
  struct queue_limits {
@@@ -1136,11 -1139,8 +1139,8 @@@ extern void blk_set_default_limits(stru
  extern void blk_set_stacking_limits(struct queue_limits *lim);
  extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                            sector_t offset);
- extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
-                           sector_t offset);
  extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
                              sector_t offset);
- extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
  extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
  extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
  extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
@@@ -1212,7 -1212,6 +1212,7 @@@ struct blk_plug 
        struct list_head cb_list; /* md requires an unplug callback */
        unsigned short rq_count;
        bool multiple_queues;
 +      bool nowait;
  };
  #define BLK_MAX_REQUEST_COUNT 16
  #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
This page took 0.100121 seconds and 4 git commands to generate.