]> Git Repo - linux.git/commitdiff
Merge branch '6.5/scsi-staging' into 6.5/scsi-fixes
authorMartin K. Petersen <[email protected]>
Tue, 11 Jul 2023 16:15:15 +0000 (12:15 -0400)
committerMartin K. Petersen <[email protected]>
Tue, 11 Jul 2023 16:15:15 +0000 (12:15 -0400)
Pull in the currently staged SCSI fixes for 6.5.

Signed-off-by: Martin K. Petersen <[email protected]>
1  2 
block/blk-zoned.c
drivers/block/virtio_blk.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/ufs/core/ufshcd.c

diff --combined block/blk-zoned.c
index 0f9f97cdddd99c797906c58a232a9f137f5ce138,da92ce0c5da98304b4cb6b75cdfa93177ea38745..619ee41a51cc8c81b0333f4f95a70e3b3ed7c442
@@@ -57,10 -57,16 +57,10 @@@ EXPORT_SYMBOL_GPL(blk_zone_cond_str)
   */
  bool blk_req_needs_zone_write_lock(struct request *rq)
  {
 -      if (blk_rq_is_passthrough(rq))
 -              return false;
 -
        if (!rq->q->disk->seq_zones_wlock)
                return false;
  
 -      if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
 -              return blk_rq_zone_is_seq(rq);
 -
 -      return false;
 +      return blk_rq_is_seq_zoned_write(rq);
  }
  EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
  
@@@ -323,8 -329,8 +323,8 @@@ static int blkdev_copy_zone_to_user(str
   * BLKREPORTZONE ioctl processing.
   * Called from blkdev_ioctl.
   */
 -int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
 -                            unsigned int cmd, unsigned long arg)
 +int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
 +              unsigned long arg)
  {
        void __user *argp = (void __user *)arg;
        struct zone_report_args args;
        return 0;
  }
  
 -static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
 -                                    const struct blk_zone_range *zrange)
 +static int blkdev_truncate_zone_range(struct block_device *bdev,
 +              blk_mode_t mode, const struct blk_zone_range *zrange)
  {
        loff_t start, end;
  
   * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
   * Called from blkdev_ioctl.
   */
 -int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
 +int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
                           unsigned int cmd, unsigned long arg)
  {
        void __user *argp = (void __user *)arg;
        if (!bdev_is_zoned(bdev))
                return -ENOTTY;
  
 -      if (!(mode & FMODE_WRITE))
 +      if (!(mode & BLK_OPEN_WRITE))
                return -EBADF;
  
        if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
@@@ -442,7 -448,6 +442,6 @@@ struct blk_revalidate_zone_args 
        unsigned long   *conv_zones_bitmap;
        unsigned long   *seq_zones_wlock;
        unsigned int    nr_zones;
-       sector_t        zone_sectors;
        sector_t        sector;
  };
  
@@@ -456,38 -461,34 +455,34 @@@ static int blk_revalidate_zone_cb(struc
        struct gendisk *disk = args->disk;
        struct request_queue *q = disk->queue;
        sector_t capacity = get_capacity(disk);
+       sector_t zone_sectors = q->limits.chunk_sectors;
+       /* Check for bad zones and holes in the zone report */
+       if (zone->start != args->sector) {
+               pr_warn("%s: Zone gap at sectors %llu..%llu\n",
+                       disk->disk_name, args->sector, zone->start);
+               return -ENODEV;
+       }
+       if (zone->start >= capacity || !zone->len) {
+               pr_warn("%s: Invalid zone start %llu, length %llu\n",
+                       disk->disk_name, zone->start, zone->len);
+               return -ENODEV;
+       }
  
        /*
         * All zones must have the same size, with the exception on an eventual
         * smaller last zone.
         */
-       if (zone->start == 0) {
-               if (zone->len == 0 || !is_power_of_2(zone->len)) {
-                       pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
-                               disk->disk_name, zone->len);
-                       return -ENODEV;
-               }
-               args->zone_sectors = zone->len;
-               args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
-       } else if (zone->start + args->zone_sectors < capacity) {
-               if (zone->len != args->zone_sectors) {
+       if (zone->start + zone->len < capacity) {
+               if (zone->len != zone_sectors) {
                        pr_warn("%s: Invalid zoned device with non constant zone size\n",
                                disk->disk_name);
                        return -ENODEV;
                }
-       } else {
-               if (zone->len > args->zone_sectors) {
-                       pr_warn("%s: Invalid zoned device with larger last zone size\n",
-                               disk->disk_name);
-                       return -ENODEV;
-               }
-       }
-       /* Check for holes in the zone report */
-       if (zone->start != args->sector) {
-               pr_warn("%s: Zone gap at sectors %llu..%llu\n",
-                       disk->disk_name, args->sector, zone->start);
+       } else if (zone->len > zone_sectors) {
+               pr_warn("%s: Invalid zoned device with larger last zone size\n",
+                       disk->disk_name);
                return -ENODEV;
        }
  
   * @disk:     Target disk
   * @update_driver_data:       Callback to update driver data on the frozen disk
   *
-  * Helper function for low-level device drivers to (re) allocate and initialize
-  * a disk request queue zone bitmaps. This functions should normally be called
-  * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
-  * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
-  * is correct.
+  * Helper function for low-level device drivers to check and (re) allocate and
+  * initialize a disk request queue zone bitmaps. This functions should normally
+  * be called within the disk ->revalidate method for blk-mq based drivers.
+  * Before calling this function, the device driver must already have set the
+  * device zone size (chunk_sector limit) and the max zone append limit.
+  * For BIO based drivers, this function cannot be used. BIO based device drivers
+  * only need to set disk->nr_zones so that the sysfs exposed value is correct.
   * If the @update_driver_data callback function is not NULL, the callback is
   * executed with the device request queue frozen after all zones have been
   * checked.
@@@ -539,9 -542,9 +536,9 @@@ int blk_revalidate_disk_zones(struct ge
                              void (*update_driver_data)(struct gendisk *disk))
  {
        struct request_queue *q = disk->queue;
-       struct blk_revalidate_zone_args args = {
-               .disk           = disk,
-       };
+       sector_t zone_sectors = q->limits.chunk_sectors;
+       sector_t capacity = get_capacity(disk);
+       struct blk_revalidate_zone_args args = { };
        unsigned int noio_flag;
        int ret;
  
        if (WARN_ON_ONCE(!queue_is_mq(q)))
                return -EIO;
  
-       if (!get_capacity(disk))
-               return -EIO;
+       if (!capacity)
+               return -ENODEV;
+       /*
+        * Checks that the device driver indicated a valid zone size and that
+        * the max zone append limit is set.
+        */
+       if (!zone_sectors || !is_power_of_2(zone_sectors)) {
+               pr_warn("%s: Invalid non power of two zone size (%llu)\n",
+                       disk->disk_name, zone_sectors);
+               return -ENODEV;
+       }
+       if (!q->limits.max_zone_append_sectors) {
+               pr_warn("%s: Invalid 0 maximum zone append limit\n",
+                       disk->disk_name);
+               return -ENODEV;
+       }
  
        /*
         * Ensure that all memory allocations in this context are done as if
         * GFP_NOIO was specified.
         */
+       args.disk = disk;
+       args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors);
        noio_flag = memalloc_noio_save();
        ret = disk->fops->report_zones(disk, 0, UINT_MAX,
                                       blk_revalidate_zone_cb, &args);
         * If zones where reported, make sure that the entire disk capacity
         * has been checked.
         */
-       if (ret > 0 && args.sector != get_capacity(disk)) {
+       if (ret > 0 && args.sector != capacity) {
                pr_warn("%s: Missing zones from sector %llu\n",
                        disk->disk_name, args.sector);
                ret = -ENODEV;
         */
        blk_mq_freeze_queue(q);
        if (ret > 0) {
-               blk_queue_chunk_sectors(q, args.zone_sectors);
                disk->nr_zones = args.nr_zones;
                swap(disk->seq_zones_wlock, args.seq_zones_wlock);
                swap(disk->conv_zones_bitmap, args.conv_zones_bitmap);
index b47358da92a231ee15bf5452c236df93452fe15d,d6df29566370a32ab21f914c11a88b364633998c..1fe011676d070ed275490e0f265ca958f8b080bf
@@@ -348,33 -348,63 +348,33 @@@ static inline void virtblk_request_done
        blk_mq_end_request(req, status);
  }
  
 -static void virtblk_complete_batch(struct io_comp_batch *iob)
 -{
 -      struct request *req;
 -
 -      rq_list_for_each(&iob->req_list, req) {
 -              virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
 -              virtblk_cleanup_cmd(req);
 -      }
 -      blk_mq_end_request_batch(iob);
 -}
 -
 -static int virtblk_handle_req(struct virtio_blk_vq *vq,
 -                            struct io_comp_batch *iob)
 -{
 -      struct virtblk_req *vbr;
 -      int req_done = 0;
 -      unsigned int len;
 -
 -      while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
 -              struct request *req = blk_mq_rq_from_pdu(vbr);
 -
 -              if (likely(!blk_should_fake_timeout(req->q)) &&
 -                  !blk_mq_complete_request_remote(req) &&
 -                  !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
 -                                       virtblk_complete_batch))
 -                      virtblk_request_done(req);
 -              req_done++;
 -      }
 -
 -      return req_done;
 -}
 -
  static void virtblk_done(struct virtqueue *vq)
  {
        struct virtio_blk *vblk = vq->vdev->priv;
 -      struct virtio_blk_vq *vblk_vq = &vblk->vqs[vq->index];
 -      int req_done = 0;
 +      bool req_done = false;
 +      int qid = vq->index;
 +      struct virtblk_req *vbr;
        unsigned long flags;
 -      DEFINE_IO_COMP_BATCH(iob);
 +      unsigned int len;
  
 -      spin_lock_irqsave(&vblk_vq->lock, flags);
 +      spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
        do {
                virtqueue_disable_cb(vq);
 -              req_done += virtblk_handle_req(vblk_vq, &iob);
 +              while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
 +                      struct request *req = blk_mq_rq_from_pdu(vbr);
  
 +                      if (likely(!blk_should_fake_timeout(req->q)))
 +                              blk_mq_complete_request(req);
 +                      req_done = true;
 +              }
                if (unlikely(virtqueue_is_broken(vq)))
                        break;
        } while (!virtqueue_enable_cb(vq));
  
 -      if (req_done) {
 -              if (!rq_list_empty(iob.req_list))
 -                      iob.complete(&iob);
 -
 -              /* In case queue is stopped waiting for more buffers. */
 +      /* In case queue is stopped waiting for more buffers. */
 +      if (req_done)
                blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
 -      }
 -      spin_unlock_irqrestore(&vblk_vq->lock, flags);
 +      spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
  }
  
  static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@@ -751,7 -781,6 +751,6 @@@ static int virtblk_probe_zoned_device(s
  {
        u32 v, wg;
        u8 model;
-       int ret;
  
        virtio_cread(vdev, struct virtio_blk_config,
                     zoned.model, &model);
                        vblk->zone_sectors);
                return -ENODEV;
        }
+       blk_queue_chunk_sectors(q, vblk->zone_sectors);
        dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
  
        if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
                blk_queue_max_discard_sectors(q, 0);
        }
  
-       ret = blk_revalidate_disk_zones(vblk->disk, NULL);
-       if (!ret) {
-               virtio_cread(vdev, struct virtio_blk_config,
-                            zoned.max_append_sectors, &v);
-               if (!v) {
-                       dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
-                       return -ENODEV;
-               }
-               if ((v << SECTOR_SHIFT) < wg) {
-                       dev_err(&vdev->dev,
-                               "write granularity %u exceeds max_append_sectors %u limit\n",
-                               wg, v);
-                       return -ENODEV;
-               }
-               blk_queue_max_zone_append_sectors(q, v);
-               dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
+       virtio_cread(vdev, struct virtio_blk_config,
+                    zoned.max_append_sectors, &v);
+       if (!v) {
+               dev_warn(&vdev->dev, "zero max_append_sectors reported\n");
+               return -ENODEV;
        }
+       if ((v << SECTOR_SHIFT) < wg) {
+               dev_err(&vdev->dev,
+                       "write granularity %u exceeds max_append_sectors %u limit\n",
+                       wg, v);
+               return -ENODEV;
+       }
+       blk_queue_max_zone_append_sectors(q, v);
+       dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
  
-       return ret;
+       return blk_revalidate_disk_zones(vblk->disk, NULL);
  }
  
  #else
@@@ -1253,37 -1279,15 +1249,37 @@@ static void virtblk_map_queues(struct b
        }
  }
  
 +static void virtblk_complete_batch(struct io_comp_batch *iob)
 +{
 +      struct request *req;
 +
 +      rq_list_for_each(&iob->req_list, req) {
 +              virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
 +              virtblk_cleanup_cmd(req);
 +      }
 +      blk_mq_end_request_batch(iob);
 +}
 +
  static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
  {
        struct virtio_blk *vblk = hctx->queue->queuedata;
        struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
 +      struct virtblk_req *vbr;
        unsigned long flags;
 +      unsigned int len;
        int found = 0;
  
        spin_lock_irqsave(&vq->lock, flags);
 -      found = virtblk_handle_req(vq, iob);
 +
 +      while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
 +              struct request *req = blk_mq_rq_from_pdu(vbr);
 +
 +              found++;
 +              if (!blk_mq_complete_request_remote(req) &&
 +                  !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
 +                                              virtblk_complete_batch))
 +                      virtblk_request_done(req);
 +      }
  
        if (found)
                blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
index 7c6efde75da664b3819161d5aa96466b0d26aaa1,7d5a155073c6271866e0cf3ce9fff1e3e30c1669..73b6ac0c01f549a1ab20fcee5172b42a0232a35a
@@@ -1678,7 -1678,6 +1678,7 @@@ struct aac_de
        u32                     handle_pci_error;
        bool                    init_reset;
        u8                      soft_reset_support;
 +      u8                      use_map_queue;
  };
  
  #define aac_adapter_interrupt(dev) \
@@@ -2618,7 -2617,7 +2618,7 @@@ struct aac_hba_info 
  struct aac_aifcmd {
        __le32 command;         /* Tell host what type of notify this is */
        __le32 seqnum;          /* To allow ordering of reports (if necessary) */
-       u8 data[1];             /* Undefined length (from kernel viewpoint) */
+       u8 data[];              /* Undefined length (from kernel viewpoint) */
  };
  
  /**
index d44c4d37b50b45748db1ff09fa08dfab6bdeaadb,892ceba51c230fa80472be7d4567b90f2aca7fcc..4ae38305c15a115730dfdf4615a21aa2c11f49f9
@@@ -3812,7 -3812,6 +3812,7 @@@ struct qla_qpair 
        uint64_t retry_term_jiff;
        struct qla_tgt_counters tgt_counters;
        uint16_t cpuid;
 +      bool cpu_mapped;
        struct qla_fw_resources fwres ____cacheline_aligned;
        struct  qla_buf_pool buf_pool;
        u32     cmd_cnt;
@@@ -4462,7 -4461,6 +4462,6 @@@ struct qla_hw_data 
  
        /* n2n */
        struct fc_els_flogi plogi_els_payld;
- #define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
  
        void            *swl;
  
index c3dd8dd4f73409ed7d46c7d340dfefdd67e8d311,e2d51f68f747de97d5b408b65b97d3dd05b959f2..367fba27fe699310f0b17f1b51a8e87a36522aac
@@@ -8434,7 -8434,7 +8434,7 @@@ qla24xx_load_risc_flash(scsi_qla_host_
                ql_dbg(ql_dbg_init, vha, 0x0163,
                    "-> fwdt%u template allocate template %#x words...\n",
                    j, risc_size);
-               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
                if (!fwdt->template) {
                        ql_log(ql_log_warn, vha, 0x0164,
                            "-> fwdt%u failed allocate template.\n", j);
@@@ -8689,7 -8689,7 +8689,7 @@@ qla24xx_load_risc_blob(scsi_qla_host_t 
                ql_dbg(ql_dbg_init, vha, 0x0173,
                    "-> fwdt%u template allocate template %#x words...\n",
                    j, risc_size);
-               fwdt->template = vmalloc(risc_size * sizeof(*dcode));
+               fwdt->template = vmalloc_array(risc_size, sizeof(*dcode));
                if (!fwdt->template) {
                        ql_log(ql_log_warn, vha, 0x0174,
                            "-> fwdt%u failed allocate template.\n", j);
@@@ -9641,9 -9641,6 +9641,9 @@@ struct qla_qpair *qla2xxx_create_qpair(
                qpair->rsp->req = qpair->req;
                qpair->rsp->qpair = qpair;
  
 +              if (!qpair->cpu_mapped)
 +                      qla_cpu_update(qpair, raw_smp_processor_id());
 +
                if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                        if (ha->fw_attributes & BIT_4)
                                qpair->difdix_supported = 1;
index 983fae84d9e80995468c7ff2fd1b8e92bd1537e6,e2812911e462f9de9d017505b0896d5aef2b81af..1294467757964e52127bde42f53774487f1dd4c2
@@@ -2815,10 -2815,10 +2815,10 @@@ static void ufshcd_map_queues(struct Sc
  static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
  {
        struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
 -              i * sizeof_utp_transfer_cmd_desc(hba);
 +              i * ufshcd_get_ucd_size(hba);
        struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
        dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
 -              i * sizeof_utp_transfer_cmd_desc(hba);
 +              i * ufshcd_get_ucd_size(hba);
        u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
                                       response_upiu);
        u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@@ -3756,7 -3756,7 +3756,7 @@@ static int ufshcd_memory_alloc(struct u
        size_t utmrdl_size, utrdl_size, ucdl_size;
  
        /* Allocate memory for UTP command descriptors */
 -      ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
 +      ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
                                                  ucdl_size,
                                                  &hba->ucdl_dma_addr,
@@@ -3856,7 -3856,7 +3856,7 @@@ static void ufshcd_host_memory_configur
        prdt_offset =
                offsetof(struct utp_transfer_cmd_desc, prd_table);
  
 -      cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
 +      cmd_desc_size = ufshcd_get_ucd_size(hba);
        cmd_desc_dma_addr = hba->ucdl_dma_addr;
  
        for (i = 0; i < hba->nutrs; i++) {
        return ret;
  }
  
+ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
+ {
+       int err;
+       struct ufs_query_req *request = NULL;
+       struct ufs_query_res *response = NULL;
+       struct ufs_dev_info *dev_info = &hba->dev_info;
+       struct utp_upiu_query_v4_0 *upiu_data;
+       if (dev_info->wspecversion < 0x400)
+               return;
+       ufshcd_hold(hba);
+       mutex_lock(&hba->dev_cmd.lock);
+       ufshcd_init_query(hba, &request, &response,
+                         UPIU_QUERY_OPCODE_WRITE_ATTR,
+                         QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
+       request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+       upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
+       put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
+       err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+       if (err)
+               dev_err(hba->dev, "%s: failed to set timestamp %d\n",
+                       __func__, err);
+       mutex_unlock(&hba->dev_cmd.lock);
+       ufshcd_release(hba);
+ }
  /**
   * ufshcd_add_lus - probe and add UFS logical units
   * @hba: per-adapter instance
@@@ -8562,7 -8597,7 +8597,7 @@@ static void ufshcd_release_sdb_queue(st
  {
        size_t ucdl_size, utrdl_size;
  
 -      ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
 +      ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
        dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
                           hba->ucdl_dma_addr);
  
@@@ -8708,6 -8743,8 +8743,8 @@@ static int ufshcd_device_init(struct uf
        ufshcd_set_ufs_dev_active(hba);
        ufshcd_force_reset_auto_bkops(hba);
  
+       ufshcd_set_timestamp_attr(hba);
        /* Gear up to HS gear if supported */
        if (hba->max_pwr_info.is_valid) {
                /*
@@@ -9574,16 -9611,8 +9611,16 @@@ static int __ufshcd_wl_suspend(struct u
                         * that performance might be impacted.
                         */
                        ret = ufshcd_urgent_bkops(hba);
 -                      if (ret)
 +                      if (ret) {
 +                              /*
 +                               * If return err in suspend flow, IO will hang.
 +                               * Trigger error handler and break suspend for
 +                               * error recovery.
 +                               */
 +                              ufshcd_force_error_recovery(hba);
 +                              ret = -EBUSY;
                                goto enable_scaling;
 +                      }
                } else {
                        /* make sure that auto bkops is disabled */
                        ufshcd_disable_auto_bkops(hba);
@@@ -9749,6 -9778,7 +9786,7 @@@ static int __ufshcd_wl_resume(struct uf
                ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
                if (ret)
                        goto set_old_link_state;
+               ufshcd_set_timestamp_attr(hba);
        }
  
        if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
This page took 0.147127 seconds and 4 git commands to generate.