]> Git Repo - J-linux.git/commitdiff
Merge branch 'fixes' into misc
authorJames Bottomley <[email protected]>
Tue, 27 Mar 2018 16:27:34 +0000 (12:27 -0400)
committerJames Bottomley <[email protected]>
Wed, 4 Apr 2018 00:38:39 +0000 (17:38 -0700)
Somewhat nasty merge due to conflicts between "33b28357dd00 scsi:
qla2xxx: Fix Async GPN_FT for FCP and FC-NVMe scan" and "2b5b96473efc
scsi: qla2xxx: Fix FC-NVMe LUN discovery"

Merge is non-trivial and has been verified by Qlogic (Cavium)

Signed-off-by: James E.J. Bottomley <[email protected]>
31 files changed:
1  2 
drivers/scsi/Makefile
drivers/scsi/aacraid/linit.c
drivers/scsi/csiostor/csio_lnode.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd.c
include/scsi/scsi_cmnd.h
include/scsi/scsi_host.h

diff --combined drivers/scsi/Makefile
index d5135efbf9cd18007db68948cf19bf4b0e52ba5d,de1b3fce936d5d7d3ee70c2dabc3957e911b438f..e29f9b8fd66db1b21167fd7d15eaf0723b59b826
@@@ -74,9 -74,12 +74,9 @@@ obj-$(CONFIG_SCSI_AIC94XX)   += aic94xx
  obj-$(CONFIG_SCSI_PM8001)     += pm8001/
  obj-$(CONFIG_SCSI_ISCI)               += isci/
  obj-$(CONFIG_SCSI_IPS)                += ips.o
 -obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
  obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
 -obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
  obj-$(CONFIG_SCSI_NCR_D700)   += 53c700.o NCR_D700.o
  obj-$(CONFIG_SCSI_NCR_Q720)   += NCR_Q720_mod.o
 -obj-$(CONFIG_SCSI_SYM53C416)  += sym53c416.o
  obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o       qlogicfas.o
  obj-$(CONFIG_PCMCIA_QLOGIC)   += qlogicfas408.o
  obj-$(CONFIG_SCSI_QLOGIC_1280)        += qla1280.o 
@@@ -90,6 -93,8 +90,6 @@@ obj-$(CONFIG_SCSI_HPSA)               += hpsa.
  obj-$(CONFIG_SCSI_SMARTPQI)   += smartpqi/
  obj-$(CONFIG_SCSI_SYM53C8XX_2)        += sym53c8xx_2/
  obj-$(CONFIG_SCSI_ZALON)      += zalon7xx.o
 -obj-$(CONFIG_SCSI_EATA_PIO)   += eata_pio.o
 -obj-$(CONFIG_SCSI_EATA)               += eata.o
  obj-$(CONFIG_SCSI_DC395x)     += dc395x.o
  obj-$(CONFIG_SCSI_AM53C974)   += esp_scsi.o   am53c974.o
  obj-$(CONFIG_CXLFLASH)                += cxlflash/
@@@ -180,7 -185,6 +180,6 @@@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) 
  CFLAGS_ncr53c8xx.o    := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
  zalon7xx-objs := zalon.o ncr53c8xx.o
  NCR_Q720_mod-objs     := NCR_Q720.o ncr53c8xx.o
- oktagon_esp_mod-objs  := oktagon_esp.o oktagon_io.o
  
  # Files generated that shall be removed upon make clean
  clean-files :=        53c700_d.h 53c700_u.h
index b3b931ab77ebdfc2190b113489bb1bcf81edd623,b730e8edb8b3272c43ca8deab990c3e5986333ca..2664ea0df35fa1384db54c3e8f0ff270cba09148
@@@ -1037,7 -1037,7 +1037,7 @@@ static int aac_eh_bus_reset(struct scsi
                        info = &aac->hba_map[bus][cid];
                        if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
                            info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
 -                              fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
 +                              fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
                                cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
                        }
                }
@@@ -1677,9 -1677,6 +1677,9 @@@ static int aac_probe_one(struct pci_de
        aac->cardtype = index;
        INIT_LIST_HEAD(&aac->entry);
  
 +      if (aac_reset_devices || reset_devices)
 +              aac->init_reset = true;
 +
        aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
        if (!aac->fibs)
                goto out_free_host;
         *      Map in the registers from the adapter.
         */
        aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
-       if ((*aac_drivers[index].init)(aac))
+       if ((*aac_drivers[index].init)(aac)) {
+               error = -ENODEV;
                goto out_unmap;
+       }
  
        if (aac->sync_mode) {
                if (aac_sync_mode)
index 1c53179523b849b6d40f0d53e65c9b57217d8865,7dbbbb81a1e7de64fc3f8ad3079b4a5879153f39..cc5611efc7a9a62dbb1762ce950e4688188bbeca
@@@ -114,7 -114,7 +114,7 @@@ static enum csio_ln_ev fwevt_to_lnevt[
  static struct csio_lnode *
  csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
  {
-       struct csio_lnode *ln = hw->rln;
+       struct csio_lnode *ln;
        struct list_head *tmp;
  
        /* Match siblings lnode with portid */
@@@ -352,14 -352,6 +352,14 @@@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *
                val = htonl(FC_PORTSPEED_1GBIT);
        else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
                val = htonl(FC_PORTSPEED_10GBIT);
 +      else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
 +              val = htonl(FC_PORTSPEED_25GBIT);
 +      else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
 +              val = htonl(FC_PORTSPEED_40GBIT);
 +      else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
 +              val = htonl(FC_PORTSPEED_50GBIT);
 +      else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
 +              val = htonl(FC_PORTSPEED_100GBIT);
        else
                val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
        csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
index acd7fb356f01ece9e060737f46d137647000f323,4b44325d1a82868b94909240347c3d6258c5a794..12dc7100bb4c240e1161e60043413d64611a3bd3
@@@ -138,12 -138,12 +138,12 @@@ static void release_port_group(struct k
  static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
                       int bufflen, struct scsi_sense_hdr *sshdr, int flags)
  {
 -      u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
 +      u8 cdb[MAX_COMMAND_SIZE];
        int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                REQ_FAILFAST_DRIVER;
  
        /* Prepare the command. */
 -      memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
 +      memset(cdb, 0x0, MAX_COMMAND_SIZE);
        cdb[0] = MAINTENANCE_IN;
        if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
                cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
  static int submit_stpg(struct scsi_device *sdev, int group_id,
                       struct scsi_sense_hdr *sshdr)
  {
 -      u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
 +      u8 cdb[MAX_COMMAND_SIZE];
        unsigned char stpg_data[8];
        int stpg_len = 8;
        int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
        put_unaligned_be16(group_id, &stpg_data[6]);
  
        /* Prepare the command. */
 -      memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
 +      memset(cdb, 0x0, MAX_COMMAND_SIZE);
        cdb[0] = MAINTENANCE_OUT;
        cdb[1] = MO_SET_TARGET_PGS;
        put_unaligned_be32(stpg_len, &cdb[6]);
@@@ -214,8 -214,8 +214,8 @@@ static struct alua_port_group *alua_fin
  /*
   * alua_alloc_pg - Allocate a new port_group structure
   * @sdev: scsi device
 - * @h: alua device_handler data
   * @group_id: port group id
 + * @tpgs: target port group settings
   *
   * Allocate a new port_group structure for a given
   * device.
@@@ -876,6 -876,11 +876,11 @@@ static void alua_rtpg_work(struct work_
  
  /**
   * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+  * @pg: ALUA port group associated with @sdev.
+  * @sdev: SCSI device for which to submit an RTPG.
+  * @qdata: Information about the callback to invoke after the RTPG.
+  * @force: Whether or not to submit an RTPG if a work item that will submit an
+  *         RTPG already has been scheduled.
   *
   * Returns true if and only if alua_rtpg_work() will be called asynchronously.
   * That function is responsible for calling @qdata->fn().
diff --combined drivers/scsi/hosts.c
index ffd1030b6c91dfd485c5f21b2b8ab436632dd587,ef22b275d0505b5bd9580182b2c67730f3181dc8..7649d63a1b8dfa27016dbdb64a645c0eb6c34aee
  #include "scsi_logging.h"
  
  
 +static int shost_eh_deadline = -1;
 +
 +module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
 +MODULE_PARM_DESC(eh_deadline,
 +               "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
 +
  static DEFINE_IDA(host_index_ida);
  
  
@@@ -154,6 -148,7 +154,6 @@@ int scsi_host_set_state(struct Scsi_Hos
                                             scsi_host_state_name(state)));
        return -EINVAL;
  }
 -EXPORT_SYMBOL(scsi_host_set_state);
  
  /**
   * scsi_remove_host - remove a scsi host
@@@ -333,8 -328,6 +333,6 @@@ static void scsi_host_dev_release(struc
        if (shost->work_q)
                destroy_workqueue(shost->work_q);
  
-       destroy_rcu_head(&shost->rcu);
        if (shost->shost_state == SHOST_CREATED) {
                /*
                 * Free the shost_dev device name here if scsi_host_alloc()
        kfree(shost);
  }
  
 -static int shost_eh_deadline = -1;
 -
 -module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
 -MODULE_PARM_DESC(eh_deadline,
 -               "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
 -
  static struct device_type scsi_host_type = {
        .name =         "scsi_host",
        .release =      scsi_host_dev_release,
@@@ -403,7 -402,6 +401,6 @@@ struct Scsi_Host *scsi_host_alloc(struc
        INIT_LIST_HEAD(&shost->starved_list);
        init_waitqueue_head(&shost->host_wait);
        mutex_init(&shost->scan_mutex);
-       init_rcu_head(&shost->rcu);
  
        index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
        if (index < 0)
                shost->dma_boundary = 0xffffffff;
  
        shost->use_blk_mq = scsi_use_blk_mq;
+       shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
  
        device_initialize(&shost->shost_gendev);
        dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
  }
  EXPORT_SYMBOL(scsi_host_alloc);
  
 -struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
 -{
 -      struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
 -
 -      if (!sht->detect) {
 -              printk(KERN_WARNING "scsi_register() called on new-style "
 -                                  "template for driver %s\n", sht->name);
 -              dump_stack();
 -      }
 -
 -      if (shost)
 -              list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
 -      return shost;
 -}
 -EXPORT_SYMBOL(scsi_register);
 -
 -void scsi_unregister(struct Scsi_Host *shost)
 -{
 -      list_del(&shost->sht_legacy_list);
 -      scsi_host_put(shost);
 -}
 -EXPORT_SYMBOL(scsi_unregister);
 -
  static int __scsi_host_match(struct device *dev, const void *data)
  {
        struct Scsi_Host *p;
diff --combined drivers/scsi/hpsa.c
index 5293e6827ce557043943415523ff1cbb0c49d977,31423b6dc26dc09b79cc4e9296488dd94382d1f1..3a9eca163db8117e7bbf1132c7965d5253850668
@@@ -901,14 -901,14 +901,14 @@@ static ssize_t host_show_legacy_board(s
        return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
  }
  
 -static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 -static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 -static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 +static DEVICE_ATTR_RO(raid_level);
 +static DEVICE_ATTR_RO(lunid);
 +static DEVICE_ATTR_RO(unique_id);
  static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 -static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
 +static DEVICE_ATTR_RO(sas_address);
  static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
                        host_show_hp_ssd_smart_path_enabled, NULL);
 -static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
 +static DEVICE_ATTR_RO(path_info);
  static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
                host_show_hp_ssd_smart_path_status,
                host_store_hp_ssd_smart_path_status);
@@@ -1045,11 -1045,7 +1045,7 @@@ static void set_performant_mode(struct 
                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
                if (unlikely(!h->msix_vectors))
                        return;
-               if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-                       c->Header.ReplyQueue =
-                               raw_smp_processor_id() % h->nreply_queues;
-               else
-                       c->Header.ReplyQueue = reply_queue % h->nreply_queues;
+               c->Header.ReplyQueue = reply_queue;
        }
  }
  
@@@ -1063,10 -1059,7 +1059,7 @@@ static void set_ioaccel1_performant_mod
         * Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->ReplyQueue = reply_queue % h->nreply_queues;
+       cp->ReplyQueue = reply_queue;
        /*
         * Set the bits in the address sent down to include:
         *  - performant mode bit (bit 0)
@@@ -1087,10 -1080,7 +1080,7 @@@ static void set_ioaccel2_tmf_performant
        /* Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->reply_queue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->reply_queue = reply_queue % h->nreply_queues;
+       cp->reply_queue = reply_queue;
        /* Set the bits in the address sent down to include:
         *  - performant mode bit not used in ioaccel mode 2
         *  - pull count (bits 0-3)
@@@ -1109,10 -1099,7 +1099,7 @@@ static void set_ioaccel2_performant_mod
         * Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->reply_queue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->reply_queue = reply_queue % h->nreply_queues;
+       cp->reply_queue = reply_queue;
        /*
         * Set the bits in the address sent down to include:
         *  - performant mode bit not used in ioaccel mode 2
@@@ -1157,6 -1144,8 +1144,8 @@@ static void __enqueue_cmd_and_start_io(
  {
        dial_down_lockup_detection_during_fw_flash(h, c);
        atomic_inc(&h->commands_outstanding);
+       reply_queue = h->reply_map[raw_smp_processor_id()];
        switch (c->cmd_type) {
        case CMD_IOACCEL1:
                set_ioaccel1_performant_mode(h, c, reply_queue);
@@@ -7376,6 -7365,26 +7365,26 @@@ static void hpsa_disable_interrupt_mode
        h->msix_vectors = 0;
  }
  
+ static void hpsa_setup_reply_map(struct ctlr_info *h)
+ {
+       const struct cpumask *mask;
+       unsigned int queue, cpu;
+       for (queue = 0; queue < h->msix_vectors; queue++) {
+               mask = pci_irq_get_affinity(h->pdev, queue);
+               if (!mask)
+                       goto fallback;
+               for_each_cpu(cpu, mask)
+                       h->reply_map[cpu] = queue;
+       }
+       return;
+ fallback:
+       for_each_possible_cpu(cpu)
+               h->reply_map[cpu] = 0;
+ }
  /* If MSI/MSI-X is supported by the kernel we will try to enable it on
   * controllers that are capable. If not, we use legacy INTx mode.
   */
@@@ -7771,6 -7780,10 +7780,10 @@@ static int hpsa_pci_init(struct ctlr_in
        err = hpsa_interrupt_mode(h);
        if (err)
                goto clean1;
+       /* setup mapping between CPU and reply queue */
+       hpsa_setup_reply_map(h);
        err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
        if (err)
                goto clean2;    /* intmode+region, pci */
@@@ -8480,6 -8493,28 +8493,28 @@@ static struct workqueue_struct *hpsa_cr
        return wq;
  }
  
+ static void hpda_free_ctlr_info(struct ctlr_info *h)
+ {
+       kfree(h->reply_map);
+       kfree(h);
+ }
+ static struct ctlr_info *hpda_alloc_ctlr_info(void)
+ {
+       struct ctlr_info *h;
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return NULL;
+       h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
+       if (!h->reply_map) {
+               kfree(h);
+               return NULL;
+       }
+       return h;
+ }
  static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        int dac, rc;
@@@ -8517,7 -8552,7 +8552,7 @@@ reinit_after_soft_reset
         * the driver.  See comments in hpsa.h for more info.
         */
        BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
-       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       h = hpda_alloc_ctlr_info();
        if (!h) {
                dev_err(&pdev->dev, "Failed to allocate controller head\n");
                return -ENOMEM;
@@@ -8916,7 -8951,7 +8951,7 @@@ static void hpsa_remove_one(struct pci_
        h->lockup_detected = NULL;                      /* init_one 2 */
        /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
  
-       kfree(h);                                       /* init_one 1 */
+       hpda_free_ctlr_info(h);                         /* init_one 1 */
  }
  
  static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
index 6de9681ace82603370005590238e315b36df19b4,a372af68d9a94bf1b7bd9a97433a8597f00e252e..ceab5e5c41c277a25f879348885be5fa847c4cd8
@@@ -223,6 -223,7 +223,7 @@@ out_done
  static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
  {
        struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+       struct domain_device *dev = cmd_to_domain_dev(cmd);
        struct sas_task *task = TO_SAS_TASK(cmd);
  
        /* At this point, we only get called following an actual abort
         */
        sas_end_task(cmd, task);
  
+       if (dev_is_sata(dev)) {
+               /* defer commands to libata so that libata EH can
+                * handle ata qcs correctly
+                */
+               list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+               return;
+       }
        /* now finish the command and move it on to the error
         * handler done list, this also takes it off the
         * error handler pending list.
        scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
  }
  
- static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
- {
-       struct domain_device *dev = cmd_to_domain_dev(cmd);
-       struct sas_ha_struct *ha = dev->port->ha;
-       struct sas_task *task = TO_SAS_TASK(cmd);
-       if (!dev_is_sata(dev)) {
-               sas_eh_finish_cmd(cmd);
-               return;
-       }
-       /* report the timeout to libata */
-       sas_end_task(cmd, task);
-       list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
- }
  static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
  {
        struct scsi_cmnd *cmd, *n;
        list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
                if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
                    cmd->device->lun == my_cmd->device->lun)
-                       sas_eh_defer_cmd(cmd);
+                       sas_eh_finish_cmd(cmd);
        }
  }
  
@@@ -487,28 -480,15 +480,28 @@@ static int sas_queue_reset(struct domai
  
  int sas_eh_abort_handler(struct scsi_cmnd *cmd)
  {
 -      int res;
 +      int res = TMF_RESP_FUNC_FAILED;
        struct sas_task *task = TO_SAS_TASK(cmd);
        struct Scsi_Host *host = cmd->device->host;
 +      struct domain_device *dev = cmd_to_domain_dev(cmd);
        struct sas_internal *i = to_sas_internal(host->transportt);
 +      unsigned long flags;
  
        if (!i->dft->lldd_abort_task)
                return FAILED;
  
 -      res = i->dft->lldd_abort_task(task);
 +      spin_lock_irqsave(host->host_lock, flags);
 +      /* We cannot do async aborts for SATA devices */
 +      if (dev_is_sata(dev) && !host->host_eh_scheduled) {
 +              spin_unlock_irqrestore(host->host_lock, flags);
 +              return FAILED;
 +      }
 +      spin_unlock_irqrestore(host->host_lock, flags);
 +
 +      if (task)
 +              res = i->dft->lldd_abort_task(task);
 +      else
 +              SAS_DPRINTK("no task to abort\n");
        if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
                return SUCCESS;
  
@@@ -631,12 -611,12 +624,12 @@@ static void sas_eh_handle_sas_errors(st
                case TASK_IS_DONE:
                        SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
                                    task);
-                       sas_eh_defer_cmd(cmd);
+                       sas_eh_finish_cmd(cmd);
                        continue;
                case TASK_IS_ABORTED:
                        SAS_DPRINTK("%s: task 0x%p is aborted\n",
                                    __func__, task);
-                       sas_eh_defer_cmd(cmd);
+                       sas_eh_finish_cmd(cmd);
                        continue;
                case TASK_IS_AT_LU:
                        SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
                                            "recovered\n",
                                            SAS_ADDR(task->dev),
                                            cmd->device->lun);
-                               sas_eh_defer_cmd(cmd);
+                               sas_eh_finish_cmd(cmd);
                                sas_scsi_clear_queue_lu(work_q, cmd);
                                goto Again;
                        }
index 905ea36da646e045ea241b2a264700850f9fff67,7b2bed5ab04c205cf3bc8d83bb50dd30b5c8fbf0..2ca9b25095da926c365e6bf8e7fb708ef06264ca
@@@ -4022,7 -4022,7 +4022,7 @@@ static int megasas_create_frame_pool(st
  
                cmd = instance->cmd_list[i];
  
 -              cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
 +              cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
                                            GFP_KERNEL, &cmd->frame_phys_addr);
  
                cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
                        return -ENOMEM;
                }
  
 -              memset(cmd->frame, 0, instance->mfi_frame_size);
                cmd->frame->io.context = cpu_to_le32(cmd->index);
                cmd->frame->io.pad_0 = 0;
                if ((instance->adapter_type == MFI_SERIES) && reset_devices)
@@@ -5164,6 -5165,26 +5164,26 @@@ skip_alloc
                instance->use_seqnum_jbod_fp = false;
  }
  
+ static void megasas_setup_reply_map(struct megasas_instance *instance)
+ {
+       const struct cpumask *mask;
+       unsigned int queue, cpu;
+       for (queue = 0; queue < instance->msix_vectors; queue++) {
+               mask = pci_irq_get_affinity(instance->pdev, queue);
+               if (!mask)
+                       goto fallback;
+               for_each_cpu(cpu, mask)
+                       instance->reply_map[cpu] = queue;
+       }
+       return;
+ fallback:
+       for_each_possible_cpu(cpu)
+               instance->reply_map[cpu] = cpu % instance->msix_vectors;
+ }
  /**
   * megasas_init_fw -  Initializes the FW
   * @instance:         Adapter soft state
@@@ -5342,6 -5363,8 +5362,8 @@@ static int megasas_init_fw(struct megas
                        goto fail_setup_irqs;
        }
  
+       megasas_setup_reply_map(instance);
        dev_info(&instance->pdev->dev,
                "firmware supports msix\t: (%d)", fw_msix_count);
        dev_info(&instance->pdev->dev,
@@@ -6122,20 -6145,29 +6144,29 @@@ static inline int megasas_alloc_mfi_ctr
   */
  static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
  {
+       instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
+                                     GFP_KERNEL);
+       if (!instance->reply_map)
+               return -ENOMEM;
        switch (instance->adapter_type) {
        case MFI_SERIES:
                if (megasas_alloc_mfi_ctrl_mem(instance))
-                       return -ENOMEM;
+                       goto fail;
                break;
        case VENTURA_SERIES:
        case THUNDERBOLT_SERIES:
        case INVADER_SERIES:
                if (megasas_alloc_fusion_context(instance))
-                       return -ENOMEM;
+                       goto fail;
                break;
        }
  
        return 0;
+  fail:
+       kfree(instance->reply_map);
+       instance->reply_map = NULL;
+       return -ENOMEM;
  }
  
  /*
   */
  static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
  {
+       kfree(instance->reply_map);
        if (instance->adapter_type == MFI_SERIES) {
                if (instance->producer)
                        pci_free_consistent(instance->pdev, sizeof(u32),
@@@ -6539,7 -6572,6 +6571,6 @@@ fail_io_attach
                pci_free_irq_vectors(instance->pdev);
  fail_init_mfi:
        scsi_host_put(host);
  fail_alloc_instance:
        pci_disable_device(pdev);
  
@@@ -6745,6 -6777,8 +6776,8 @@@ megasas_resume(struct pci_dev *pdev
        if (rval < 0)
                goto fail_reenable_msix;
  
+       megasas_setup_reply_map(instance);
        if (instance->adapter_type != MFI_SERIES) {
                megasas_reset_reply_desc(instance);
                if (megasas_ioc_init_fusion(instance)) {
@@@ -7032,15 -7066,15 +7065,15 @@@ static int megasas_mgmt_fasync(int fd, 
  /**
   * megasas_mgmt_poll -  char node "poll" entry point
   * */
 -static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
 +static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
  {
 -      unsigned int mask;
 +      __poll_t mask;
        unsigned long flags;
  
        poll_wait(file, &megasas_poll_wait, wait);
        spin_lock_irqsave(&poll_aen_lock, flags);
        if (megasas_poll_wait_aen)
 -              mask = (POLLIN | POLLRDNORM);
 +              mask = (EPOLLIN | EPOLLRDNORM);
        else
                mask = 0;
        megasas_poll_wait_aen = 0;
index 0a0e7aad0ca43c8e5de9ab71318931365df5aa95,0aafbfd1b7465c3a566c89392e73bfab1e037173..61f93a13495632b5e7b532af385f1c5a9576ebc7
@@@ -125,362 -125,6 +125,362 @@@ _scsih_set_fwfault_debug(const char *va
  module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
        param_get_int, &mpt3sas_fwfault_debug, 0644);
  
 +/**
 + * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
 + *                              in BAR0 space.
 + *
 + * @ioc: per adapter object
 + * @reply: reply message frame(lower 32bit addr)
 + * @index: System request message index.
 + *
 + * @Returns - Nothing
 + */
 +static void
 +_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
 +              u32 index)
 +{
 +      /*
 +       * 256 is offset within sys register.
 +       * 256 offset MPI frame starts. Max MPI frame supported is 32.
 +       * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
 +       */
 +      u16 cmd_credit = ioc->facts.RequestCredit + 1;
 +      void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
 +                      MPI_FRAME_START_OFFSET +
 +                      (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
 +
 +      writel(reply, reply_free_iomem);
 +}
 +
 +/**
 + * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
 + *                            to system/BAR0 region.
 + *
 + * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
 + * @src: Pointer to the Source data.
 + * @size: Size of data to be copied.
 + */
 +static void
 +_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
 +{
 +      int i;
 +      u32 *src_virt_mem = (u32 *)src;
 +
 +      for (i = 0; i < size/4; i++)
 +              writel((u32)src_virt_mem[i],
 +                              (void __iomem *)dst_iomem + (i * 4));
 +}
 +
 +/**
 + * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
 + *
 + * @dst_iomem: Pointer to the destination location in BAR0 space.
 + * @src: Pointer to the Source data.
 + * @size: Size of data to be copied.
 + */
 +static void
 +_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
 +{
 +      int i;
 +      u32 *src_virt_mem = (u32 *)(src);
 +
 +      for (i = 0; i < size/4; i++)
 +              writel((u32)src_virt_mem[i],
 +                      (void __iomem *)dst_iomem + (i * 4));
 +}
 +
 +/**
 + * _base_get_chain - Calculates and Returns virtual chain address
 + *                     for the provided smid in BAR0 space.
 + *
 + * @ioc: per adapter object
 + * @smid: system request message index
 + * @sge_chain_count: Scatter gather chain count.
 + *
 + * @Return: chain address.
 + */
 +static inline void __iomem*
 +_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +              u8 sge_chain_count)
 +{
 +      void __iomem *base_chain, *chain_virt;
 +      u16 cmd_credit = ioc->facts.RequestCredit + 1;
 +
 +      base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
 +              (cmd_credit * ioc->request_sz) +
 +              REPLY_FREE_POOL_SIZE;
 +      chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
 +                      ioc->request_sz) + (sge_chain_count * ioc->request_sz);
 +      return chain_virt;
 +}
 +
 +/**
 + * _base_get_chain_phys - Calculates and Returns physical address
 + *                    in BAR0 for scatter gather chains, for
 + *                    the provided smid.
 + *
 + * @ioc: per adapter object
 + * @smid: system request message index
 + * @sge_chain_count: Scatter gather chain count.
 + *
 + * @Return - Physical chain address.
 + */
 +static inline phys_addr_t
 +_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +              u8 sge_chain_count)
 +{
 +      phys_addr_t base_chain_phys, chain_phys;
 +      u16 cmd_credit = ioc->facts.RequestCredit + 1;
 +
 +      base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
 +              (cmd_credit * ioc->request_sz) +
 +              REPLY_FREE_POOL_SIZE;
 +      chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
 +                      ioc->request_sz) + (sge_chain_count * ioc->request_sz);
 +      return chain_phys;
 +}
 +
 +/**
 + * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
 + *                    buffer address for the provided smid.
 + *                    (Each smid can have 64K starts from 17024)
 + *
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * @Returns - Pointer to buffer location in BAR0.
 + */
 +
 +static void __iomem *
 +_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      u16 cmd_credit = ioc->facts.RequestCredit + 1;
 +      // Added extra 1 to reach end of chain.
 +      void __iomem *chain_end = _base_get_chain(ioc,
 +                      cmd_credit + 1,
 +                      ioc->facts.MaxChainDepth);
 +      return chain_end + (smid * 64 * 1024);
 +}
 +
 +/**
 + * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
 + *            Host buffer Physical address for the provided smid.
 + *            (Each smid can have 64K starts from 17024)
 + *
 + * @ioc: per adapter object
 + * @smid: system request message index
 + *
 + * @Returns - Pointer to buffer location in BAR0.
 + */
 +static phys_addr_t
 +_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +{
 +      u16 cmd_credit = ioc->facts.RequestCredit + 1;
 +      phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
 +                      cmd_credit + 1,
 +                      ioc->facts.MaxChainDepth);
 +      return chain_end_phys + (smid * 64 * 1024);
 +}
 +
 +/**
 + * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
 + *                    lookup list and Provides chain_buffer
 + *                    address for the matching dma address.
 + *                    (Each smid can have 64K starts from 17024)
 + *
 + * @ioc: per adapter object
 + * @chain_buffer_dma: Chain buffer dma address.
 + *
 + * @Returns - Pointer to chain buffer. Or Null on Failure.
 + */
 +static void *
 +_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
 +              dma_addr_t chain_buffer_dma)
 +{
 +      u16 index;
 +
 +      for (index = 0; index < ioc->chain_depth; index++) {
 +              if (ioc->chain_lookup[index].chain_buffer_dma ==
 +                              chain_buffer_dma)
 +                      return ioc->chain_lookup[index].chain_buffer;
 +      }
 +      pr_info(MPT3SAS_FMT
 +          "Provided chain_buffer_dma address is not in the lookup list\n",
 +          ioc->name);
 +      return NULL;
 +}
 +
 +/**
 + * _clone_sg_entries -        MPI EP's scsiio and config requests
 + *                    are handled here. Base function for
 + *                    double buffering, before submitting
 + *                    the requests.
 + *
 + * @ioc: per adapter object.
 + * @mpi_request: mf request pointer.
 + * @smid: system request message index.
 + *
 + * @Returns: Nothing.
 + */
 +static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
 +              void *mpi_request, u16 smid)
 +{
 +      Mpi2SGESimple32_t *sgel, *sgel_next;
 +      u32  sgl_flags, sge_chain_count = 0;
 +      bool is_write = 0;
 +      u16 i = 0;
 +      void __iomem *buffer_iomem;
 +      phys_addr_t buffer_iomem_phys;
 +      void __iomem *buff_ptr;
 +      phys_addr_t buff_ptr_phys;
 +      void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
 +      void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
 +      phys_addr_t dst_addr_phys;
 +      MPI2RequestHeader_t *request_hdr;
 +      struct scsi_cmnd *scmd;
 +      struct scatterlist *sg_scmd = NULL;
 +      int is_scsiio_req = 0;
 +
 +      request_hdr = (MPI2RequestHeader_t *) mpi_request;
 +
 +      if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
 +              Mpi25SCSIIORequest_t *scsiio_request =
 +                      (Mpi25SCSIIORequest_t *)mpi_request;
 +              sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
 +              is_scsiio_req = 1;
 +      } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
 +              Mpi2ConfigRequest_t  *config_req =
 +                      (Mpi2ConfigRequest_t *)mpi_request;
 +              sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
 +      } else
 +              return;
 +
 +      /* From smid we can get scsi_cmd, once we have sg_scmd,
 +       * we just need to get sg_virt and sg_next to get virual
 +       * address associated with sgel->Address.
 +       */
 +
 +      if (is_scsiio_req) {
 +              /* Get scsi_cmd using smid */
 +              scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
 +              if (scmd == NULL) {
 +                      pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
 +                      return;
 +              }
 +
 +              /* Get sg_scmd from scmd provided */
 +              sg_scmd = scsi_sglist(scmd);
 +      }
 +
 +      /*
 +       * 0 - 255      System register
 +       * 256 - 4352   MPI Frame. (This is based on maxCredit 32)
 +       * 4352 - 4864  Reply_free pool (512 byte is reserved
 +       *              considering maxCredit 32. Reply need extra
 +       *              room, for mCPU case kept four times of
 +       *              maxCredit).
 +       * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
 +       *              128 byte size = 12288)
 +       * 17152 - x    Host buffer mapped with smid.
 +       *              (Each smid can have 64K Max IO.)
 +       * BAR0+Last 1K MSIX Addr and Data
 +       * Total size in use 2113664 bytes of 4MB BAR0
 +       */
 +
 +      buffer_iomem = _base_get_buffer_bar0(ioc, smid);
 +      buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
 +
 +      buff_ptr = buffer_iomem;
 +      buff_ptr_phys = buffer_iomem_phys;
 +      WARN_ON(buff_ptr_phys > U32_MAX);
 +
 +      if (sgel->FlagsLength &
 +                      (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
 +              is_write = 1;
 +
 +      for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
 +
 +              sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
 +
 +              switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
 +              case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
 +                      /*
 +                       * Helper function which on passing
 +                       * chain_buffer_dma returns chain_buffer. Get
 +                       * the virtual address for sgel->Address
 +                       */
 +                      sgel_next =
 +                              _base_get_chain_buffer_dma_to_chain_buffer(ioc,
 +                                              sgel->Address);
 +                      if (sgel_next == NULL)
 +                              return;
 +                      /*
 +                       * This is coping 128 byte chain
 +                       * frame (not a host buffer)
 +                       */
 +                      dst_chain_addr[sge_chain_count] =
 +                              _base_get_chain(ioc,
 +                                      smid, sge_chain_count);
 +                      src_chain_addr[sge_chain_count] =
 +                                              (void *) sgel_next;
 +                      dst_addr_phys = _base_get_chain_phys(ioc,
 +                                              smid, sge_chain_count);
 +                      WARN_ON(dst_addr_phys > U32_MAX);
 +                      sgel->Address = (u32)dst_addr_phys;
 +                      sgel = sgel_next;
 +                      sge_chain_count++;
 +                      break;
 +              case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
 +                      if (is_write) {
 +                              if (is_scsiio_req) {
 +                                      _base_clone_to_sys_mem(buff_ptr,
 +                                          sg_virt(sg_scmd),
 +                                          (sgel->FlagsLength & 0x00ffffff));
 +                                      /*
 +                                       * FIXME: this relies on a a zero
 +                                       * PCI mem_offset.
 +                                       */
 +                                      sgel->Address = (u32)buff_ptr_phys;
 +                              } else {
 +                                      _base_clone_to_sys_mem(buff_ptr,
 +                                          ioc->config_vaddr,
 +                                          (sgel->FlagsLength & 0x00ffffff));
 +                                      sgel->Address = (u32)buff_ptr_phys;
 +                              }
 +                      }
 +                      buff_ptr += (sgel->FlagsLength & 0x00ffffff);
 +                      buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff);
 +                      if ((sgel->FlagsLength &
 +                          (MPI2_SGE_FLAGS_END_OF_BUFFER
 +                                      << MPI2_SGE_FLAGS_SHIFT)))
 +                              goto eob_clone_chain;
 +                      else {
 +                              /*
 +                               * Every single element in MPT will have
 +                               * associated sg_next. Better to sanity that
 +                               * sg_next is not NULL, but it will be a bug
 +                               * if it is null.
 +                               */
 +                              if (is_scsiio_req) {
 +                                      sg_scmd = sg_next(sg_scmd);
 +                                      if (sg_scmd)
 +                                              sgel++;
 +                                      else
 +                                              goto eob_clone_chain;
 +                              }
 +                      }
 +                      break;
 +              }
 +      }
 +
 +eob_clone_chain:
 +      for (i = 0; i < sge_chain_count; i++) {
 +              if (is_scsiio_req)
 +                      _base_clone_to_sys_mem(dst_chain_addr[i],
 +                              src_chain_addr[i], ioc->request_sz);
 +      }
 +}
 +
  /**
   *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
   * @arg: input argument, used to derive ioc
@@@ -1231,7 -875,7 +1231,7 @@@ _base_async_event(struct MPT3SAS_ADAPTE
        ack_request->EventContext = mpi_reply->EventContext;
        ack_request->VF_ID = 0;  /* TODO */
        ack_request->VP_ID = 0;
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
  
   out:
  
@@@ -1431,10 -1075,6 +1431,10 @@@ _base_interrupt(int irq, void *bus_id
                                    0 : ioc->reply_free_host_index + 1;
                                ioc->reply_free[ioc->reply_free_host_index] =
                                    cpu_to_le32(reply);
 +                              if (ioc->is_mcpu_endpoint)
 +                                      _base_clone_reply_to_sys_mem(ioc,
 +                                              cpu_to_le32(reply),
 +                                              ioc->reply_free_host_index);
                                writel(ioc->reply_free_host_index,
                                    &ioc->chip->ReplyFreeHostIndex);
                        }
@@@ -2574,9 -2214,6 +2574,9 @@@ _base_config_dma_addressing(struct MPT3
        struct sysinfo s;
        u64 consistent_dma_mask;
  
 +      if (ioc->is_mcpu_endpoint)
 +              goto try_32bit;
 +
        if (ioc->dma_mask)
                consistent_dma_mask = DMA_BIT_MASK(64);
        else
                }
        }
  
 + try_32bit:
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
            && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
                ioc->base_add_sg_single = &_base_add_sg_single_32;
@@@ -2774,8 -2410,11 +2774,11 @@@ _base_assign_reply_queues(struct MPT3SA
                                continue;
                        }
  
-                       for_each_cpu(cpu, mask)
+                       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+                               if (cpu >= ioc->cpu_msix_table_sz)
+                                       break;
                                ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+                       }
                }
                return;
        }
@@@ -2942,7 -2581,7 +2945,7 @@@ mpt3sas_base_map_resources(struct MPT3S
        u32 pio_sz;
        int i, r = 0;
        u64 pio_chip = 0;
 -      u64 chip_phys = 0;
 +      phys_addr_t chip_phys = 0;
        struct adapter_reply_queue *reply_q;
  
        dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
                        if (memap_sz)
                                continue;
                        ioc->chip_phys = pci_resource_start(pdev, i);
 -                      chip_phys = (u64)ioc->chip_phys;
 +                      chip_phys = ioc->chip_phys;
                        memap_sz = pci_resource_len(pdev, i);
                        ioc->chip = ioremap(ioc->chip_phys, memap_sz);
                }
                    "IO-APIC enabled"),
                    pci_irq_vector(ioc->pdev, reply_q->msix_index));
  
 -      pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
 -          ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
 +      pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
 +          ioc->name, &chip_phys, ioc->chip, memap_sz);
        pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
            ioc->name, (unsigned long long)pio_chip, pio_sz);
  
@@@ -3321,29 -2960,6 +3324,29 @@@ mpt3sas_base_free_smid(struct MPT3SAS_A
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  }
  
 +/**
 + * _base_mpi_ep_writeq - 32 bit write to MMIO
 + * @b: data payload
 + * @addr: address in MMIO space
 + * @writeq_lock: spin lock
 + *
 + * This special handling for MPI EP to take care of 32 bit
 + * environment where its not quarenteed to send the entire word
 + * in one transfer.
 + */
 +static inline void
 +_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
 +                                      spinlock_t *writeq_lock)
 +{
 +      unsigned long flags;
 +      __u64 data_out = cpu_to_le64(b);
 +
 +      spin_lock_irqsave(writeq_lock, flags);
 +      writel((u32)(data_out), addr);
 +      writel((u32)(data_out >> 32), (addr + 4));
 +      spin_unlock_irqrestore(writeq_lock, flags);
 +}
 +
  /**
   * _base_writeq - 64 bit write to MMIO
   * @ioc: per adapter object
@@@ -3365,40 -2981,16 +3368,40 @@@ _base_writeq(__u64 b, volatile void __i
  static inline void
  _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
  {
 -      unsigned long flags;
 -      __u64 data_out = cpu_to_le64(b);
 -
 -      spin_lock_irqsave(writeq_lock, flags);
 -      writel((u32)(data_out), addr);
 -      writel((u32)(data_out >> 32), (addr + 4));
 -      spin_unlock_irqrestore(writeq_lock, flags);
 +      _base_mpi_ep_writeq(b, addr, writeq_lock);
  }
  #endif
  
 +/**
 + * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
 + * @ioc: per adapter object
 + * @smid: system request message index
 + * @handle: device handle
 + *
 + * Return nothing.
 + */
 +static void
 +_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
 +{
 +      Mpi2RequestDescriptorUnion_t descriptor;
 +      u64 *request = (u64 *)&descriptor;
 +      void *mpi_req_iomem;
 +      __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
 +
 +      _clone_sg_entries(ioc, (void *) mfp, smid);
 +      mpi_req_iomem = (void *)ioc->chip +
 +                      MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
 +      _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
 +                                      ioc->request_sz);
 +      descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 +      descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
 +      descriptor.SCSIIO.SMID = cpu_to_le16(smid);
 +      descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
 +      descriptor.SCSIIO.LMID = 0;
 +      _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 +          &ioc->scsi_lookup_lock);
 +}
 +
  /**
   * _base_put_smid_scsi_io - send SCSI_IO request to firmware
   * @ioc: per adapter object
@@@ -3424,15 -3016,15 +3427,15 @@@ _base_put_smid_scsi_io(struct MPT3SAS_A
  }
  
  /**
 - * _base_put_smid_fast_path - send fast path request to firmware
 + * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
   * @ioc: per adapter object
   * @smid: system request message index
   * @handle: device handle
   *
   * Return nothing.
   */
 -static void
 -_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +void
 +mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        u16 handle)
  {
        Mpi2RequestDescriptorUnion_t descriptor;
  }
  
  /**
 - * _base_put_smid_hi_priority - send Task Management request to firmware
 + * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
   * @ioc: per adapter object
   * @smid: system request message index
   * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
   * Return nothing.
   */
 -static void
 -_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +void
 +mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        u16 msix_task)
  {
        Mpi2RequestDescriptorUnion_t descriptor;
 -      u64 *request = (u64 *)&descriptor;
 +      void *mpi_req_iomem;
 +      u64 *request;
 +
 +      if (ioc->is_mcpu_endpoint) {
 +              MPI2RequestHeader_t *request_hdr;
 +
 +              __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
 +
 +              request_hdr = (MPI2RequestHeader_t *)mfp;
 +              /* TBD 256 is offset within sys register. */
 +              mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET
 +                                      + (smid * ioc->request_sz);
 +              _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
 +                                                      ioc->request_sz);
 +      }
 +
 +      request = (u64 *)&descriptor;
  
        descriptor.HighPriority.RequestFlags =
            MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
        descriptor.HighPriority.SMID = cpu_to_le16(smid);
        descriptor.HighPriority.LMID = 0;
        descriptor.HighPriority.Reserved1 = 0;
 -      _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 -          &ioc->scsi_lookup_lock);
 +      if (ioc->is_mcpu_endpoint)
 +              _base_mpi_ep_writeq(*request,
 +                              &ioc->chip->RequestDescriptorPostLow,
 +                              &ioc->scsi_lookup_lock);
 +      else
 +              _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 +                  &ioc->scsi_lookup_lock);
  }
  
  /**
 - * _base_put_smid_nvme_encap - send NVMe encapsulated request to
 + * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
   *  firmware
   * @ioc: per adapter object
   * @smid: system request message index
   *
   * Return nothing.
   */
 -static void
 -_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +void
 +mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  {
        Mpi2RequestDescriptorUnion_t descriptor;
        u64 *request = (u64 *)&descriptor;
  }
  
  /**
 - * _base_put_smid_default - Default, primarily used for config pages
 + * mpt3sas_base_put_smid_default - Default, primarily used for config pages
   * @ioc: per adapter object
   * @smid: system request message index
   *
   * Return nothing.
   */
 -static void
 -_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 +void
 +mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  {
        Mpi2RequestDescriptorUnion_t descriptor;
 -      u64 *request = (u64 *)&descriptor;
 +      void *mpi_req_iomem;
 +      u64 *request;
 +      MPI2RequestHeader_t *request_hdr;
 +
 +      if (ioc->is_mcpu_endpoint) {
 +              __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
  
 +              request_hdr = (MPI2RequestHeader_t *)mfp;
 +
 +              _clone_sg_entries(ioc, (void *) mfp, smid);
 +              /* TBD 256 is offset within sys register */
 +              mpi_req_iomem = (void *)ioc->chip +
 +                      MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
 +              _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
 +                                                      ioc->request_sz);
 +      }
 +      request = (u64 *)&descriptor;
        descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
        descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
        descriptor.Default.SMID = cpu_to_le16(smid);
        descriptor.Default.LMID = 0;
        descriptor.Default.DescriptorTypeDependent = 0;
 -      _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 -          &ioc->scsi_lookup_lock);
 -}
 -
 -/**
 -* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
 -*   Atomic Request Descriptor
 -* @ioc: per adapter object
 -* @smid: system request message index
 -* @handle: device handle, unused in this function, for function type match
 -*
 -* Return nothing.
 -*/
 -static void
 -_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 -      u16 handle)
 -{
 -      Mpi26AtomicRequestDescriptor_t descriptor;
 -      u32 *request = (u32 *)&descriptor;
 -
 -      descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
 -      descriptor.MSIxIndex = _base_get_msix_index(ioc);
 -      descriptor.SMID = cpu_to_le16(smid);
 -
 -      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 -}
 -
 -/**
 - * _base_put_smid_fast_path_atomic - send fast path request to firmware
 - * using Atomic Request Descriptor
 - * @ioc: per adapter object
 - * @smid: system request message index
 - * @handle: device handle, unused in this function, for function type match
 - * Return nothing
 - */
 -static void
 -_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 -      u16 handle)
 -{
 -      Mpi26AtomicRequestDescriptor_t descriptor;
 -      u32 *request = (u32 *)&descriptor;
 -
 -      descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
 -      descriptor.MSIxIndex = _base_get_msix_index(ioc);
 -      descriptor.SMID = cpu_to_le16(smid);
 -
 -      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 -}
 -
 -/**
 - * _base_put_smid_hi_priority_atomic - send Task Management request to
 - * firmware using Atomic Request Descriptor
 - * @ioc: per adapter object
 - * @smid: system request message index
 - * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
 - *
 - * Return nothing.
 - */
 -static void
 -_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 -      u16 msix_task)
 -{
 -      Mpi26AtomicRequestDescriptor_t descriptor;
 -      u32 *request = (u32 *)&descriptor;
 -
 -      descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
 -      descriptor.MSIxIndex = msix_task;
 -      descriptor.SMID = cpu_to_le16(smid);
 -
 -      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 -}
 -
 -/**
 - * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
 - *   firmware using Atomic Request Descriptor
 - * @ioc: per adapter object
 - * @smid: system request message index
 - *
 - * Return nothing.
 - */
 -static void
 -_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 -{
 -      Mpi26AtomicRequestDescriptor_t descriptor;
 -      u32 *request = (u32 *)&descriptor;
 -
 -      descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
 -      descriptor.MSIxIndex = _base_get_msix_index(ioc);
 -      descriptor.SMID = cpu_to_le16(smid);
 -
 -      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 -}
 -
 -/**
 - * _base_put_smid_default - Default, primarily used for config pages
 - * use Atomic Request Descriptor
 - * @ioc: per adapter object
 - * @smid: system request message index
 - *
 - * Return nothing.
 - */
 -static void
 -_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 -{
 -      Mpi26AtomicRequestDescriptor_t descriptor;
 -      u32 *request = (u32 *)&descriptor;
 -
 -      descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
 -      descriptor.MSIxIndex = _base_get_msix_index(ioc);
 -      descriptor.SMID = cpu_to_le16(smid);
 -
 -      writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
 +      if (ioc->is_mcpu_endpoint)
 +              _base_mpi_ep_writeq(*request,
 +                              &ioc->chip->RequestDescriptorPostLow,
 +                              &ioc->scsi_lookup_lock);
 +      else
 +              _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
 +                              &ioc->scsi_lookup_lock);
  }
  
  /**
@@@ -4229,21 -3890,17 +4232,21 @@@ _base_allocate_memory_pools(struct MPT3
                sg_tablesize = min_t(unsigned short, sg_tablesize,
                   MPT_KDUMP_MIN_PHYS_SEGMENTS);
  
 -      if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
 -              sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
 -      else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
 -              sg_tablesize = min_t(unsigned short, sg_tablesize,
 -                                    SG_MAX_SEGMENTS);
 -              pr_warn(MPT3SAS_FMT
 -               "sg_tablesize(%u) is bigger than kernel"
 -               " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
 -               sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
 +      if (ioc->is_mcpu_endpoint)
 +              ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
 +      else {
 +              if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
 +                      sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
 +              else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
 +                      sg_tablesize = min_t(unsigned short, sg_tablesize,
 +                                      SG_MAX_SEGMENTS);
 +                      pr_warn(MPT3SAS_FMT
 +                              "sg_tablesize(%u) is bigger than kernel "
 +                              "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
 +                              sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
 +              }
 +              ioc->shost->sg_tablesize = sg_tablesize;
        }
 -      ioc->shost->sg_tablesize = sg_tablesize;
  
        ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
                (facts->RequestCredit / 4));
        /* reply free queue sizing - taking into account for 64 FW events */
        ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
  
 -      /* calculate reply descriptor post queue depth */
 -      ioc->reply_post_queue_depth = ioc->hba_queue_depth +
 -                              ioc->reply_free_queue_depth +  1 ;
 -      /* align the reply post queue on the next 16 count boundary */
 -      if (ioc->reply_post_queue_depth % 16)
 -              ioc->reply_post_queue_depth += 16 -
 -              (ioc->reply_post_queue_depth % 16);
 +      /* mCPU manage single counters for simplicity */
 +      if (ioc->is_mcpu_endpoint)
 +              ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
 +      else {
 +              /* calculate reply descriptor post queue depth */
 +              ioc->reply_post_queue_depth = ioc->hba_queue_depth +
 +                      ioc->reply_free_queue_depth +  1;
 +              /* align the reply post queue on the next 16 count boundary */
 +              if (ioc->reply_post_queue_depth % 16)
 +                      ioc->reply_post_queue_depth += 16 -
 +                              (ioc->reply_post_queue_depth % 16);
 +      }
  
        if (ioc->reply_post_queue_depth >
            facts->MaxReplyDescriptorPostQueueDepth) {
@@@ -5137,7 -4789,7 +5140,7 @@@ mpt3sas_base_sas_iounit_control(struct 
            mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
                ioc->ioc_link_reset_in_progress = 1;
        init_completion(&ioc->base_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@@ -5237,7 -4889,7 +5240,7 @@@ mpt3sas_base_scsi_enclosure_processor(s
        ioc->base_cmds.smid = smid;
        memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
        init_completion(&ioc->base_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@@ -5422,6 -5074,8 +5425,6 @@@ _base_get_ioc_facts(struct MPT3SAS_ADAP
        if ((facts->IOCCapabilities &
              MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
                ioc->rdpq_array_capable = 1;
 -      if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
 -              ioc->atomic_desc_capable = 1;
        facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
        facts->IOCRequestFrameSize =
            le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@@ -5663,7 -5317,7 +5666,7 @@@ _base_send_port_enable(struct MPT3SAS_A
        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  
        init_completion(&ioc->port_enable_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@@ -5726,7 -5380,7 +5729,7 @@@ mpt3sas_port_enable(struct MPT3SAS_ADAP
        memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
        mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        return 0;
  }
  
@@@ -5845,7 -5499,7 +5848,7 @@@ _base_event_notification(struct MPT3SAS
                mpi_request->EventMasks[i] =
                    cpu_to_le32(ioc->event_masks[i]);
        init_completion(&ioc->base_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@@ -6165,12 -5819,8 +6168,12 @@@ _base_make_ioc_operational(struct MPT3S
        /* initialize Reply Free Queue */
        for (i = 0, reply_address = (u32)ioc->reply_dma ;
            i < ioc->reply_free_queue_depth ; i++, reply_address +=
 -          ioc->reply_sz)
 +          ioc->reply_sz) {
                ioc->reply_free[i] = cpu_to_le32(reply_address);
 +              if (ioc->is_mcpu_endpoint)
 +                      _base_clone_reply_to_sys_mem(ioc,
 +                                      (__le32)reply_address, i);
 +      }
  
        /* initialize reply queues */
        if (ioc->is_driver_loading)
@@@ -6359,10 -6009,20 +6362,10 @@@ mpt3sas_base_attach(struct MPT3SAS_ADAP
                break;
        }
  
 -      if (ioc->atomic_desc_capable) {
 -              ioc->put_smid_default = &_base_put_smid_default_atomic;
 -              ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
 -              ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
 -              ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
 -              ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
 -      } else {
 -              ioc->put_smid_default = &_base_put_smid_default;
 +      if (ioc->is_mcpu_endpoint)
 +              ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
 +      else
                ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
 -              ioc->put_smid_fast_path = &_base_put_smid_fast_path;
 -              ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
 -              ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
 -      }
 -
  
        /*
         * These function pointers for other requests that don't
@@@ -6637,14 -6297,14 +6640,14 @@@ _base_reset_handler(struct MPT3SAS_ADAP
  }
  
  /**
-  * _wait_for_commands_to_complete - reset controller
+  * mpt3sas_wait_for_commands_to_complete - reset controller
   * @ioc: Pointer to MPT_ADAPTER structure
   *
   * This function is waiting 10s for all pending commands to complete
   * prior to putting controller in reset.
   */
static void
- _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
+ void
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
  {
        u32 ioc_state;
  
@@@ -6717,7 -6377,7 +6720,7 @@@ mpt3sas_base_hard_reset_handler(struct 
                        is_fault = 1;
        }
        _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
-       _wait_for_commands_to_complete(ioc);
+       mpt3sas_wait_for_commands_to_complete(ioc);
        _base_mask_interrupts(ioc);
        r = _base_make_ioc_ready(ioc, type);
        if (r)
index 4de0251e158effa0bc776e7c4c2b0e5e816105b8,99ccf83b8c518c91b794a0311ce79d4cd41de795..ae36d8fb2f2bdab5f63de502d6cf6b9d2ab1b086
@@@ -95,8 -95,6 +95,8 @@@
  #define MPT_MIN_PHYS_SEGMENTS 16
  #define MPT_KDUMP_MIN_PHYS_SEGMENTS   32
  
 +#define MCPU_MAX_CHAINS_PER_IO        3
 +
  #ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
  #define MPT3SAS_SG_DEPTH              CONFIG_SCSI_MPT3SAS_MAX_SGE
  #else
  #define MPT3SAS_NVME_QUEUE_DEPTH      128
  #define MPT_NAME_LENGTH                       32      /* generic length of strings */
  #define MPT_STRING_LENGTH             64
 +#define MPI_FRAME_START_OFFSET                256
 +#define REPLY_FREE_POOL_SIZE          512 /*(32 maxcredix *4)*(4 times)*/
  
  #define MPT_MAX_CALLBACKS             32
  
@@@ -1103,7 -1099,7 +1103,7 @@@ struct MPT3SAS_ADAPTER 
        char            tmp_string[MPT_STRING_LENGTH];
        struct pci_dev  *pdev;
        Mpi2SystemInterfaceRegs_t __iomem *chip;
 -      resource_size_t chip_phys;
 +      phys_addr_t     chip_phys;
        int             logging_level;
        int             fwfault_debug;
        u8              ir_firmware;
        u16             config_page_sz;
        void            *config_page;
        dma_addr_t      config_page_dma;
 +      void            *config_vaddr;
  
        /* scsiio request */
        u16             hba_queue_depth;
        u32             ring_buffer_offset;
        u32             ring_buffer_sz;
        u8              is_warpdrive;
 +      u8              is_mcpu_endpoint;
        u8              hide_ir_msg;
        u8              mfg_pg10_hide_flag;
        u8              hide_drives;
        void            *device_remove_in_progress;
        u16             device_remove_in_progress_sz;
        u8              is_gen35_ioc;
 -      u8              atomic_desc_capable;
        PUT_SMID_IO_FP_HIP put_smid_scsi_io;
 -      PUT_SMID_IO_FP_HIP put_smid_fast_path;
 -      PUT_SMID_IO_FP_HIP put_smid_hi_priority;
 -      PUT_SMID_DEFAULT put_smid_default;
 -      PUT_SMID_DEFAULT put_smid_nvme_encap;
  
  };
  
@@@ -1395,12 -1394,6 +1395,12 @@@ void *mpt3sas_base_get_pcie_sgl(struct 
  dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
  void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
  
 +void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +      u16 handle);
 +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 +      u16 msix_task);
 +void mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid);
 +void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
  /* hi-priority queue */
  u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
  u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
@@@ -1440,6 -1433,9 +1440,9 @@@ void mpt3sas_base_update_missing_delay(
  
  int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
  
+ void
+ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
  
  /* scsih shared API */
  struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
index 50efccd73cb1a61378912bfebd720fcd5a352ea7,a1cb0236c550330a5de690462f5ac48c4a3e8bad..89be0170aef6d02ca3edfa93580a48e33f2d3ccb
@@@ -2679,7 -2679,7 +2679,7 @@@ mpt3sas_scsih_issue_tm(struct MPT3SAS_A
        int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
        mpt3sas_scsih_set_tm_flag(ioc, handle);
        init_completion(&ioc->tm_cmds.done);
 -      ioc->put_smid_hi_priority(ioc, smid, msix_task);
 +      mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
        wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
        if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
                pr_err(MPT3SAS_FMT "%s: timeout\n",
@@@ -2835,7 -2835,8 +2835,8 @@@ scsih_abort(struct scsi_cmnd *scmd
        _scsih_tm_display_info(ioc, scmd);
  
        sas_device_priv_data = scmd->device->hostdata;
-       if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+       if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+           ioc->remove_host) {
                sdev_printk(KERN_INFO, scmd->device,
                        "device been deleted! scmd(%p)\n", scmd);
                scmd->result = DID_NO_CONNECT << 16;
@@@ -2898,7 -2899,8 +2899,8 @@@ scsih_dev_reset(struct scsi_cmnd *scmd
        _scsih_tm_display_info(ioc, scmd);
  
        sas_device_priv_data = scmd->device->hostdata;
-       if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+       if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+           ioc->remove_host) {
                sdev_printk(KERN_INFO, scmd->device,
                        "device been deleted! scmd(%p)\n", scmd);
                scmd->result = DID_NO_CONNECT << 16;
@@@ -2961,7 -2963,8 +2963,8 @@@ scsih_target_reset(struct scsi_cmnd *sc
        _scsih_tm_display_info(ioc, scmd);
  
        sas_device_priv_data = scmd->device->hostdata;
-       if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+       if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+           ioc->remove_host) {
                starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
                        scmd);
                scmd->result = DID_NO_CONNECT << 16;
@@@ -3019,7 -3022,7 +3022,7 @@@ scsih_host_reset(struct scsi_cmnd *scmd
            ioc->name, scmd);
        scsi_print_command(scmd);
  
-       if (ioc->is_driver_loading) {
+       if (ioc->is_driver_loading || ioc->remove_host) {
                pr_info(MPT3SAS_FMT "Blocking the host reset\n",
                    ioc->name);
                r = FAILED;
@@@ -3638,7 -3641,7 +3641,7 @@@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTE
        mpi_request->DevHandle = cpu_to_le16(handle);
        mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
        set_bit(handle, ioc->device_remove_in_progress);
 -      ioc->put_smid_hi_priority(ioc, smid, 0);
 +      mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
  
  out:
@@@ -3739,7 -3742,7 +3742,7 @@@ _scsih_tm_tr_complete(struct MPT3SAS_AD
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
        mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
        mpi_request->DevHandle = mpi_request_tm->DevHandle;
 -      ioc->put_smid_default(ioc, smid_sas_ctrl);
 +      mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
  
        return _scsih_check_for_pending_tm(ioc, smid);
  }
@@@ -3834,7 -3837,7 +3837,7 @@@ _scsih_tm_tr_volume_send(struct MPT3SAS
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
        mpi_request->DevHandle = cpu_to_le16(handle);
        mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
 -      ioc->put_smid_hi_priority(ioc, smid, 0);
 +      mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
  }
  
  /**
@@@ -3926,7 -3929,7 +3929,7 @@@ _scsih_issue_delayed_event_ack(struct M
        ack_request->EventContext = event_context;
        ack_request->VF_ID = 0;  /* TODO */
        ack_request->VP_ID = 0;
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
  }
  
  /**
@@@ -3983,7 -3986,7 +3986,7 @@@ _scsih_issue_delayed_sas_io_unit_ctrl(s
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
        mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
        mpi_request->DevHandle = handle;
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
  }
  
  /**
@@@ -4453,7 -4456,7 +4456,7 @@@ _scsih_flush_running_cmds(struct MPT3SA
                st = scsi_cmd_priv(scmd);
                mpt3sas_base_clear_st(ioc, st);
                scsi_dma_unmap(scmd);
-               if (ioc->pci_error_recovery)
+               if (ioc->pci_error_recovery || ioc->remove_host)
                        scmd->result = DID_NO_CONNECT << 16;
                else
                        scmd->result = DID_RESET << 16;
@@@ -4712,12 -4715,12 +4715,12 @@@ scsih_qcmd(struct Scsi_Host *shost, str
                if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
                        mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
                            MPI25_SCSIIO_IOFLAGS_FAST_PATH);
 -                      ioc->put_smid_fast_path(ioc, smid, handle);
 +                      mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
                } else
                        ioc->put_smid_scsi_io(ioc, smid,
                            le16_to_cpu(mpi_request->DevHandle));
        } else
 -              ioc->put_smid_default(ioc, smid);
 +              mpt3sas_base_put_smid_default(ioc, smid);
        return 0;
  
   out:
@@@ -7606,7 -7609,7 +7609,7 @@@ _scsih_ir_fastpath(struct MPT3SAS_ADAPT
            handle, phys_disk_num));
  
        init_completion(&ioc->scsih_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@@ -9697,7 -9700,7 +9700,7 @@@ _scsih_ir_shutdown(struct MPT3SAS_ADAPT
        if (!ioc->hide_ir_msg)
                pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
        init_completion(&ioc->scsih_cmds.done);
 -      ioc->put_smid_default(ioc, smid);
 +      mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@@ -9739,6 -9742,10 +9742,10 @@@ static void scsih_remove(struct pci_de
        unsigned long flags;
  
        ioc->remove_host = 1;
+       mpt3sas_wait_for_commands_to_complete(ioc);
+       _scsih_flush_running_cmds(ioc);
        _scsih_fw_event_cleanup_queue(ioc);
  
        spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@@ -9815,6 -9822,10 +9822,10 @@@ scsih_shutdown(struct pci_dev *pdev
        unsigned long flags;
  
        ioc->remove_host = 1;
+       mpt3sas_wait_for_commands_to_complete(ioc);
+       _scsih_flush_running_cmds(ioc);
        _scsih_fw_event_cleanup_queue(ioc);
  
        spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@@ -10335,7 -10346,6 +10346,7 @@@ _scsih_determine_hba_mpi_version(struc
        case MPI2_MFGPAGE_DEVID_SAS2308_1:
        case MPI2_MFGPAGE_DEVID_SAS2308_2:
        case MPI2_MFGPAGE_DEVID_SAS2308_3:
 +      case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
                return MPI2_VERSION;
        case MPI25_MFGPAGE_DEVID_SAS3004:
        case MPI25_MFGPAGE_DEVID_SAS3008:
@@@ -10413,18 -10423,11 +10424,18 @@@ _scsih_probe(struct pci_dev *pdev, cons
                ioc->hba_mpi_version_belonged = hba_mpi_version;
                ioc->id = mpt2_ids++;
                sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
 -              if (pdev->device == MPI2_MFGPAGE_DEVID_SSS6200) {
 +              switch (pdev->device) {
 +              case MPI2_MFGPAGE_DEVID_SSS6200:
                        ioc->is_warpdrive = 1;
                        ioc->hide_ir_msg = 1;
 -              } else
 +                      break;
 +              case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
 +                      ioc->is_mcpu_endpoint = 1;
 +                      break;
 +              default:
                        ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
 +                      break;
 +              }
                break;
        case MPI25_VERSION:
        case MPI26_VERSION:
        shost->transportt = mpt3sas_transport_template;
        shost->unique_id = ioc->id;
  
 -      if (max_sectors != 0xFFFF) {
 -              if (max_sectors < 64) {
 -                      shost->max_sectors = 64;
 -                      pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
 -                          "for max_sectors, range is 64 to 32767. Assigning "
 -                          "value of 64.\n", ioc->name, max_sectors);
 -              } else if (max_sectors > 32767) {
 -                      shost->max_sectors = 32767;
 -                      pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
 -                          "for max_sectors, range is 64 to 32767. Assigning "
 -                          "default value of 32767.\n", ioc->name,
 -                          max_sectors);
 -              } else {
 -                      shost->max_sectors = max_sectors & 0xFFFE;
 -                      pr_info(MPT3SAS_FMT
 +      if (ioc->is_mcpu_endpoint) {
 +              /* mCPU MPI support 64K max IO */
 +              shost->max_sectors = 128;
 +              pr_info(MPT3SAS_FMT
                                "The max_sectors value is set to %d\n",
                                ioc->name, shost->max_sectors);
 +      } else {
 +              if (max_sectors != 0xFFFF) {
 +                      if (max_sectors < 64) {
 +                              shost->max_sectors = 64;
 +                              pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
 +                                  "for max_sectors, range is 64 to 32767. " \
 +                                  "Assigning value of 64.\n", \
 +                                  ioc->name, max_sectors);
 +                      } else if (max_sectors > 32767) {
 +                              shost->max_sectors = 32767;
 +                              pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
 +                                  "for max_sectors, range is 64 to 32767." \
 +                                  "Assigning default value of 32767.\n", \
 +                                  ioc->name, max_sectors);
 +                      } else {
 +                              shost->max_sectors = max_sectors & 0xFFFE;
 +                              pr_info(MPT3SAS_FMT
 +                                      "The max_sectors value is set to %d\n",
 +                                      ioc->name, shost->max_sectors);
 +                      }
                }
        }
 -
        /* register EEDP capabilities with SCSI layer */
        if (prot_mask > 0)
                scsi_host_set_prot(shost, prot_mask);
        snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
            "fw_event_%s%d", ioc->driver_name, ioc->id);
        ioc->firmware_event_thread = alloc_ordered_workqueue(
-           ioc->firmware_event_name, WQ_MEM_RECLAIM);
+           ioc->firmware_event_name, 0);
        if (!ioc->firmware_event_thread) {
                pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
@@@ -10861,8 -10856,6 +10872,8 @@@ static const struct pci_device_id mpt3s
                PCI_ANY_ID, PCI_ANY_ID },
        { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
                PCI_ANY_ID, PCI_ANY_ID },
 +      { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
 +              PCI_ANY_ID, PCI_ANY_ID },
        /* SSS6200 */
        { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
                PCI_ANY_ID, PCI_ANY_ID },
index 667d7697ba01d6a63da5b28ce6953d7cb9465e09,03c772c223fa9efdfdd471216ddbc515b299e0de..d09afe1b567d9dd2cbfd383fb771071e61d61609
@@@ -87,7 -87,7 +87,7 @@@ static void qedi_process_text_resp(stru
  {
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
 -      struct iscsi_task_context *task_ctx;
 +      struct e4_iscsi_task_context *task_ctx;
        struct iscsi_text_rsp *resp_hdr_ptr;
        struct iscsi_text_response_hdr *cqe_text_response;
        struct qedi_cmd *cmd;
@@@ -260,7 -260,7 +260,7 @@@ static void qedi_process_login_resp(str
  {
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
 -      struct iscsi_task_context *task_ctx;
 +      struct e4_iscsi_task_context *task_ctx;
        struct iscsi_login_rsp *resp_hdr_ptr;
        struct iscsi_login_response_hdr *cqe_login_response;
        struct qedi_cmd *cmd;
@@@ -326,7 -326,7 +326,7 @@@ static void qedi_get_rq_bdq_buf(struct 
                  (qedi->bdq_prod_idx % qedi->rq_num_entries));
  
        /* Obtain buffer address from rqe_opaque */
 -      idx = cqe->rqe_opaque.lo;
 +      idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
        }
  
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 -                "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
 -                cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
 +                "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
  
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@@ -362,7 -363,7 +362,7 @@@ static void qedi_put_rq_bdq_buf(struct 
        struct scsi_bd *pbl;
  
        /* Obtain buffer address from rqe_opaque */
 -      idx = cqe->rqe_opaque.lo;
 +      idx = cqe->rqe_opaque;
        if (idx > (QEDI_BDQ_NUM - 1)) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
                  pbl, pbl->address.hi, pbl->address.lo, idx);
 -      pbl->opaque.hi = 0;
 -      pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
 +      pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
 +      pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
 +      pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
 +      pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
  
        /* Increment producer to let f/w know we've handled the frame */
        qedi->bdq_prod_idx += count;
@@@ -762,6 -761,11 +762,11 @@@ static void qedi_process_cmd_cleanup_re
  
        iscsi_cid = cqe->conn_id;
        qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+       if (!qedi_conn) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+                         "icid not found 0x%x\n", cqe->conn_id);
+               return;
+       }
  
        /* Based on this itt get the corresponding qedi_cmd */
        spin_lock_bh(&qedi_conn->tmf_work_lock);
@@@ -1018,7 -1022,7 +1023,7 @@@ int qedi_send_iscsi_login(struct qedi_c
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_login_req *login_hdr;
        struct scsi_sge *resp_sge = NULL;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
@@@ -1121,7 -1124,7 +1126,7 @@@ int qedi_send_iscsi_logout(struct qedi_
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_logout *logout_hdr = NULL;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct qedi_cmd *qedi_cmd;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
@@@ -1470,7 -1472,7 +1475,7 @@@ static int qedi_send_iscsi_tmf(struct q
        struct iscsi_tmf_request_hdr tmf_pdu_header;
        struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
        struct iscsi_task *ctask;
        struct iscsi_tm *tmf_hdr;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
@@@ -1609,7 -1610,7 +1614,7 @@@ int qedi_send_iscsi_text(struct qedi_co
        struct scsi_sgl_task_params tx_sgl_task_params;
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct qedi_ctx *qedi = qedi_conn->qedi;
        struct iscsi_text *text_hdr;
        struct scsi_sge *req_sge = NULL;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
@@@ -1710,7 -1710,7 +1715,7 @@@ int qedi_send_iscsi_nopout(struct qedi_
        struct scsi_sgl_task_params rx_sgl_task_params;
        struct iscsi_task_params task_params;
        struct qedi_ctx *qedi = qedi_conn->qedi;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_nopout *nopout_hdr;
        struct scsi_sge *resp_sge = NULL;
        struct qedi_cmd *qedi_cmd;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        qedi_cmd->task_id = tid;
  
@@@ -2052,7 -2051,7 +2057,7 @@@ int qedi_iscsi_send_ioreq(struct iscsi_
        struct iscsi_task_params task_params;
        struct iscsi_conn_params conn_params;
        struct scsi_initiator_cmd_params cmd_params;
 -      struct iscsi_task_context *fw_task_ctx;
 +      struct e4_iscsi_task_context *fw_task_ctx;
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
        enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
                return -ENOMEM;
  
        fw_task_ctx =
 -           (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 -      memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 +           (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
 +                                                             tid);
 +      memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
  
        cmd->task_id = tid;
  
index e992f9d3ef000c5fffd7a0068efff15915850c96,8b637d1fe5a4164bdd0b3af2aaaa8d0b50a2257c..4da3592aec0f46e494ee203abad6378a18004645
@@@ -60,7 -60,7 +60,7 @@@ static int qedi_iscsi_event_cb(void *co
  {
        struct qedi_ctx *qedi;
        struct qedi_endpoint *qedi_ep;
 -      struct async_data *data;
 +      struct iscsi_eqe_data *data;
        int rval = 0;
  
        if (!context || !fw_handle) {
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
  
 -      data = (struct async_data *)fw_handle;
 +      data = (struct iscsi_eqe_data *)fw_handle;
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
 -                "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
 -                 data->cid, data->itid, data->error_code,
 -                 data->fw_debug_param);
 +                "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
 +                 data->icid, data->conn_id, data->error_code,
 +                 data->error_pdu_opcode_reserved);
  
 -      qedi_ep = qedi->ep_tbl[data->cid];
 +      qedi_ep = qedi->ep_tbl[data->icid];
  
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
                          "Cannot process event, ep already disconnected, cid=0x%x\n",
 -                         data->cid);
 +                         data->icid);
                WARN_ON(1);
                return -ENODEV;
        }
@@@ -339,12 -339,12 +339,12 @@@ static int qedi_init_uio(struct qedi_ct
  static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
                                  struct qed_sb_info *sb_info, u16 sb_id)
  {
 -      struct status_block *sb_virt;
 +      struct status_block_e4 *sb_virt;
        dma_addr_t sb_phys;
        int ret;
  
        sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
 -                                   sizeof(struct status_block), &sb_phys,
 +                                   sizeof(struct status_block_e4), &sb_phys,
                                     GFP_KERNEL);
        if (!sb_virt) {
                QEDI_ERR(&qedi->dbg_ctx,
@@@ -858,6 -858,7 +858,6 @@@ static int qedi_set_iscsi_pf_param(stru
  
        qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
        qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
 -      qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
  
  err_alloc_mem:
        return rval;
@@@ -960,7 -961,7 +960,7 @@@ static bool qedi_process_completions(st
  {
        struct qedi_ctx *qedi = fp->qedi;
        struct qed_sb_info *sb_info = fp->sb_info;
 -      struct status_block *sb = sb_info->sb_virt;
 +      struct status_block_e4 *sb = sb_info->sb_virt;
        struct qedi_percpu_s *p = NULL;
        struct global_queue *que;
        u16 prod_idx;
@@@ -1016,7 -1017,7 +1016,7 @@@ static bool qedi_fp_has_work(struct qed
        struct qedi_ctx *qedi = fp->qedi;
        struct global_queue *que;
        struct qed_sb_info *sb_info = fp->sb_info;
 -      struct status_block *sb = sb_info->sb_virt;
 +      struct status_block_e4 *sb = sb_info->sb_virt;
        u16 prod_idx;
  
        barrier();
@@@ -1263,10 -1264,8 +1263,10 @@@ static int qedi_alloc_bdq(struct qedi_c
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
                          "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
                          pbl, pbl->address.hi, pbl->address.lo, i);
 -              pbl->opaque.hi = 0;
 -              pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
 +              pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
 +              pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
 +              pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
 +              pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
                pbl++;
        }
  
@@@ -1724,7 -1723,6 +1724,6 @@@ static ssize_t qedi_show_boot_eth_info(
  {
        struct qedi_ctx *qedi = data;
        struct nvm_iscsi_initiator *initiator;
-       char *str = buf;
        int rc = 1;
        u32 ipv6_en, dhcp_en, ip_len;
        struct nvm_iscsi_block *block;
  
        switch (type) {
        case ISCSI_BOOT_ETH_IP_ADDR:
-               rc = snprintf(str, ip_len, fmt, ip);
+               rc = snprintf(buf, ip_len, fmt, ip);
                break;
        case ISCSI_BOOT_ETH_SUBNET_MASK:
-               rc = snprintf(str, ip_len, fmt, sub);
+               rc = snprintf(buf, ip_len, fmt, sub);
                break;
        case ISCSI_BOOT_ETH_GATEWAY:
-               rc = snprintf(str, ip_len, fmt, gw);
+               rc = snprintf(buf, ip_len, fmt, gw);
                break;
        case ISCSI_BOOT_ETH_FLAGS:
-               rc = snprintf(str, 3, "%hhd\n",
+               rc = snprintf(buf, 3, "%hhd\n",
                              SYSFS_FLAG_FW_SEL_BOOT);
                break;
        case ISCSI_BOOT_ETH_INDEX:
-               rc = snprintf(str, 3, "0\n");
+               rc = snprintf(buf, 3, "0\n");
                break;
        case ISCSI_BOOT_ETH_MAC:
-               rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+               rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
                break;
        case ISCSI_BOOT_ETH_VLAN:
-               rc = snprintf(str, 12, "%d\n",
+               rc = snprintf(buf, 12, "%d\n",
                              GET_FIELD2(initiator->generic_cont0,
                                         NVM_ISCSI_CFG_INITIATOR_VLAN));
                break;
        case ISCSI_BOOT_ETH_ORIGIN:
                if (dhcp_en)
-                       rc = snprintf(str, 3, "3\n");
+                       rc = snprintf(buf, 3, "3\n");
                break;
        default:
                rc = 0;
@@@ -1819,7 -1817,6 +1818,6 @@@ static ssize_t qedi_show_boot_ini_info(
  {
        struct qedi_ctx *qedi = data;
        struct nvm_iscsi_initiator *initiator;
-       char *str = buf;
        int rc;
        struct nvm_iscsi_block *block;
  
  
        switch (type) {
        case ISCSI_BOOT_INI_INITIATOR_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
-                             initiator->initiator_name.byte);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+                            initiator->initiator_name.byte);
                break;
        default:
                rc = 0;
@@@ -1860,7 -1857,6 +1858,6 @@@ static ssize_
  qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                        char *buf, enum qedi_nvm_tgts idx)
  {
-       char *str = buf;
        int rc = 1;
        u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
        struct nvm_iscsi_block *block;
  
        switch (type) {
        case ISCSI_BOOT_TGT_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
-                             block->target[idx].target_name.byte);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+                            block->target[idx].target_name.byte);
                break;
        case ISCSI_BOOT_TGT_IP_ADDR:
                if (ipv6_en)
-                       rc = snprintf(str, ip_len, "%pI6\n",
+                       rc = snprintf(buf, ip_len, "%pI6\n",
                                      block->target[idx].ipv6_addr.byte);
                else
-                       rc = snprintf(str, ip_len, "%pI4\n",
+                       rc = snprintf(buf, ip_len, "%pI4\n",
                                      block->target[idx].ipv4_addr.byte);
                break;
        case ISCSI_BOOT_TGT_PORT:
-               rc = snprintf(str, 12, "%d\n",
+               rc = snprintf(buf, 12, "%d\n",
                              GET_FIELD2(block->target[idx].generic_cont0,
                                         NVM_ISCSI_CFG_TARGET_TCP_PORT));
                break;
        case ISCSI_BOOT_TGT_LUN:
-               rc = snprintf(str, 22, "%.*d\n",
+               rc = snprintf(buf, 22, "%.*d\n",
                              block->target[idx].lun.value[1],
                              block->target[idx].lun.value[0]);
                break;
        case ISCSI_BOOT_TGT_CHAP_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
-                             chap_name);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            chap_name);
                break;
        case ISCSI_BOOT_TGT_CHAP_SECRET:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
-                             chap_secret);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            chap_secret);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
-                             mchap_name);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            mchap_name);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
-                             mchap_secret);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            mchap_secret);
                break;
        case ISCSI_BOOT_TGT_FLAGS:
-               rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+               rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
                break;
        case ISCSI_BOOT_TGT_NIC_ASSOC:
-               rc = snprintf(str, 3, "0\n");
+               rc = snprintf(buf, 3, "0\n");
                break;
        default:
                rc = 0;
@@@ -2303,8 -2299,8 +2300,8 @@@ static int __qedi_probe(struct pci_dev 
        }
  
  #ifdef CONFIG_DEBUG_FS
 -      qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
 -                         &qedi_dbg_fops);
 +      qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops,
 +                         qedi_dbg_fops);
  #endif
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
index 1abc8a9064b3a5e8a80aa56d147e611204927b11,d52ee990707d3e8de898812b0cf56bee646c206b..5fd44c50bbac240a86182ae8a3a928dace3f67f4
@@@ -14,7 -14,7 +14,7 @@@
   * | Module Init and Probe        |       0x0193       | 0x0146         |
   * |                              |                    | 0x015b-0x0160        |
   * |                              |                    | 0x016e               |
-  * | Mailbox commands             |       0x1205       | 0x11a2-0x11ff        |
+  * | Mailbox commands             |       0x1206       | 0x11a2-0x11ff        |
   * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
   * |                            |                    | 0x211a         |
   * |                              |                    | 0x211c-0x2128  |
@@@ -60,7 -60,7 +60,7 @@@
   * |                              |                    | 0xb13c-0xb140  |
   * |                              |                    | 0xb149               |
   * | MultiQ                       |       0xc010       |              |
 - * | Misc                         |       0xd302       | 0xd031-0xd0ff        |
 + * | Misc                         |       0xd303       | 0xd031-0xd0ff        |
   * |                              |                    | 0xd101-0xd1fe        |
   * |                              |                    | 0xd214-0xd2fe        |
   * | Target Mode                |       0xe081       |                |
@@@ -717,7 -717,7 +717,7 @@@ qla2xxx_dump_post_process(scsi_qla_host
  
  /**
   * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
 - * @ha: HA context
 + * @vha: HA context
   * @hardware_locked: Called with the hardware_lock
   */
  void
@@@ -887,7 -887,7 +887,7 @@@ qla2300_fw_dump_failed
  
  /**
   * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
 - * @ha: HA context
 + * @vha: HA context
   * @hardware_locked: Called with the hardware_lock
   */
  void
index 54625eb2904f3300935c1d2638c0f40cc9980d11,c9689f97c307ac34c0baeed35bfd60b13c2e96e2..eb2ec1fb07cbe7a6f2118b0504e2e3bde76374ae
  struct name_list_extended {
        struct get_name_list_extended *l;
        dma_addr_t              ldma;
-       struct list_head        fcports;        /* protect by sess_list */
+       struct list_head        fcports;
+       spinlock_t              fcports_lock;
        u32                     size;
-       u8                      sent;
  };
  /*
   * Timeout timer counts in seconds
@@@ -2356,8 -2356,6 +2356,8 @@@ typedef struct fc_port 
  #define NVME_PRLI_SP_DISCOVERY  BIT_3
        uint8_t nvme_flag;
  #define NVME_FLAG_REGISTERED 4
 +#define NVME_FLAG_DELETING 2
 +#define NVME_FLAG_RESETTING 1
  
        struct fc_port *conflict;
        unsigned char logout_completed;
@@@ -2983,14 -2981,8 +2983,14 @@@ enum scan_flags_t 
        SF_QUEUED = BIT_1,
  };
  
 +enum fc4type_t {
 +      FS_FC4TYPE_FCP  = BIT_0,
 +      FS_FC4TYPE_NVME = BIT_1,
 +};
 +
  struct fab_scan_rp {
        port_id_t id;
 +      enum fc4type_t fc4type;
        u8 port_name[8];
        u8 node_name[8];
  };
@@@ -3282,7 -3274,6 +3282,7 @@@ struct qla_work_evt 
                } nack;
                struct {
                        u8 fc4_type;
 +                      srb_t *sp;
                } gpnft;
         } u;
  };
@@@ -3472,6 -3463,7 +3472,6 @@@ struct qla_qpair 
        struct work_struct q_work;
        struct list_head qp_list_elem; /* vha->qp_list */
        struct list_head hints_list;
 -      struct list_head nvme_done_list;
        uint16_t cpuid;
        struct qla_tgt_counters tgt_counters;
  };
@@@ -4289,6 -4281,8 +4289,6 @@@ typedef struct scsi_qla_host 
        struct          nvme_fc_local_port *nvme_local_port;
        struct completion nvme_del_done;
        struct list_head nvme_rport_list;
 -      atomic_t        nvme_active_aen_cnt;
 -      uint16_t        nvme_last_rptd_aen;
  
        uint16_t        fcoe_vlan_id;
        uint16_t        fcoe_fcf_idx;
index 39dd62b8c6496640d5b808e09242b4da31db08d8,403fa096f8c807bbc1510cad0bc7ee1230dddb6c..2288757b5c9e6ba118b3e7360b7094fceea135eb
@@@ -21,10 -21,11 +21,10 @@@ static int qla_async_rsnn_nn(scsi_qla_h
  
  /**
   * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
 - * @ha: HA context
 - * @req_size: request size in bytes
 - * @rsp_size: response size in bytes
 + * @vha: HA context
 + * @arg: CT arguments
   *
 - * Returns a pointer to the @ha's ms_iocb.
 + * Returns a pointer to the @vha's ms_iocb.
   */
  void *
  qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
@@@ -60,8 -61,9 +60,8 @@@
  
  /**
   * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
 - * @ha: HA context
 - * @req_size: request size in bytes
 - * @rsp_size: response size in bytes
 + * @vha: HA context
 + * @arg: CT arguments
   *
   * Returns a pointer to the @ha's ms_iocb.
   */
@@@ -99,7 -101,7 +99,7 @@@ qla24xx_prep_ms_iocb(scsi_qla_host_t *v
  
  /**
   * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
 - * @ct_req: CT request buffer
 + * @p: CT request buffer
   * @cmd: GS command
   * @rsp_size: response size in bytes
   *
@@@ -194,7 -196,7 +194,7 @@@ qla2x00_chk_ms_status(scsi_qla_host_t *
  
  /**
   * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
 - * @ha: HA context
 + * @vha: HA context
   * @fcport: fcport entry to updated
   *
   * Returns 0 on success.
@@@ -281,7 -283,7 +281,7 @@@ qla2x00_gid_pt_rsp_size(scsi_qla_host_
  
  /**
   * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * NOTE: Non-Nx_Ports are not requested.
@@@ -369,7 -371,7 +369,7 @@@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw
  
  /**
   * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * Returns 0 on success.
@@@ -439,7 -441,7 +439,7 @@@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw
  
  /**
   * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * Returns 0 on success.
@@@ -581,7 -583,7 +581,7 @@@ err2
  
  /**
   * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -673,8 -675,7 +673,8 @@@ done
  
  /**
   * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
 - * @ha: HA context
 + * @vha: HA context
 + * @type: not used
   *
   * Returns 0 on success.
   */
@@@ -768,7 -769,7 +768,7 @@@ done
  
  /**
   * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -873,7 -874,7 +873,7 @@@ qla2x00_get_sym_node_name(scsi_qla_host
  
  /**
   * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -969,7 -970,7 +969,7 @@@ done
  
  /**
   * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
 - * @ha: HA context
 + * @vha: HA context
   * @cmd: GS command
   * @scmd_len: Subcommand length
   * @data_size: response size in bytes
@@@ -1002,7 -1003,7 +1002,7 @@@ qla2x00_prep_sns_cmd(scsi_qla_host_t *v
  
  /**
   * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
 - * @ha: HA context
 + * @vha: HA context
   * @fcport: fcport entry to updated
   *
   * This command uses the old Exectute SNS Command mailbox routine.
@@@ -1066,7 -1067,7 +1066,7 @@@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha
  
  /**
   * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * This command uses the old Exectute SNS Command mailbox routine.
@@@ -1139,7 -1140,7 +1139,7 @@@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha
  
  /**
   * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * This command uses the old Exectute SNS Command mailbox routine.
@@@ -1195,7 -1196,7 +1195,7 @@@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha
  
  /**
   * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * This command uses the old Exectute SNS Command mailbox routine.
@@@ -1258,7 -1259,7 +1258,7 @@@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha
  
  /**
   * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
 - * @ha: HA context
 + * @vha: HA context
   *
   * This command uses the old Exectute SNS Command mailbox routine.
   *
@@@ -1307,7 -1308,8 +1307,7 @@@ qla2x00_sns_rft_id(scsi_qla_host_t *vha
  
  /**
   * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
 - * HBA.
 - * @ha: HA context
 + * @vha: HA context
   *
   * This command uses the old Exectute SNS Command mailbox routine.
   *
@@@ -1363,7 -1365,7 +1363,7 @@@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha
  
  /**
   * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -1399,7 -1401,7 +1399,7 @@@ qla2x00_mgmt_svr_login(scsi_qla_host_t 
  
  /**
   * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
 - * @ha: HA context
 + * @vha: HA context
   * @req_size: request size in bytes
   * @rsp_size: response size in bytes
   *
@@@ -1437,7 -1439,7 +1437,7 @@@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host
  
  /**
   * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
 - * @ha: HA context
 + * @vha: HA context
   * @req_size: request size in bytes
   * @rsp_size: response size in bytes
   *
@@@ -1494,7 -1496,7 +1494,7 @@@ qla2x00_update_ms_fdmi_iocb(scsi_qla_ho
  
  /**
   * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
 - * @ct_req: CT request buffer
 + * @p: CT request buffer
   * @cmd: GS command
   * @rsp_size: response size in bytes
   *
@@@ -1516,8 -1518,8 +1516,8 @@@ qla2x00_prep_ct_fdmi_req(struct ct_sns_
  }
  
  /**
 - * qla2x00_fdmi_rhba() -
 - * @ha: HA context
 + * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -1726,8 -1728,8 +1726,8 @@@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha
  }
  
  /**
 - * qla2x00_fdmi_rpa() -
 - * @ha: HA context
 + * qla2x00_fdmi_rpa() - perform RPA registration
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -1938,8 -1940,8 +1938,8 @@@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha
  }
  
  /**
 - * qla2x00_fdmiv2_rhba() -
 - * @ha: HA context
 + * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2255,7 -2257,7 +2255,7 @@@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vh
  
  /**
   * qla2x00_fdmi_dhba() -
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2303,7 -2305,7 +2303,7 @@@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha
  
  /**
   * qla2x00_fdmiv2_rpa() -
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2633,7 -2635,7 +2633,7 @@@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha
  
  /**
   * qla2x00_fdmi_register() -
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2691,7 -2693,7 +2691,7 @@@ out
  
  /**
   * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * Returns 0 on success.
@@@ -2776,7 -2778,7 +2776,7 @@@ qla24xx_prep_ct_fm_req(struct ct_sns_pk
  
  /**
   * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   * Returns 0 on success.
@@@ -2890,7 -2892,7 +2890,7 @@@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_i
  /**
   * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
   *
 - * @ha: HA context
 + * @vha: HA context
   * @list: switch info entries to populate
   *
   */
@@@ -3177,6 -3179,7 +3177,7 @@@ done_free_sp
        sp->free(sp);
        fcport->flags &= ~FCF_ASYNC_SENT;
  done:
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return rval;
  }
  
@@@ -3368,6 -3371,7 +3369,7 @@@ done_free_sp
        sp->free(sp);
        fcport->flags &= ~FCF_ASYNC_SENT;
  done:
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return rval;
  }
  
@@@ -3858,6 -3862,7 +3860,6 @@@ void qla24xx_async_gnnft_done(scsi_qla_
        fc_port_t *fcport;
        u32 i, rc;
        bool found;
 -      u8 fc4type = sp->gen2;
        struct fab_scan_rp *rp;
        unsigned long flags;
  
                            "%s %d %8phC post new sess\n",
                            __func__, __LINE__, rp->port_name);
                        qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
 -                          rp->node_name, NULL, fc4type);
 +                          rp->node_name, NULL, rp->fc4type);
                }
        }
  
        spin_lock_irqsave(&vha->work_lock, flags);
        vha->scan.scan_flags &= ~SF_SCANNING;
        spin_unlock_irqrestore(&vha->work_lock, flags);
 -
 -      if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled)
 -              qla24xx_async_gpnft(vha, FC4_TYPE_NVME);
  }
  
 -static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 +static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
 +      struct srb *sp)
  {
 -      struct srb *sp = s;
 -      struct scsi_qla_host *vha = sp->vha;
 -      struct qla_work_evt *e;
 +      struct qla_hw_data *ha = vha->hw;
 +      int num_fibre_dev = ha->max_fibre_devices;
        struct ct_sns_req *ct_req =
                (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
        struct ct_sns_gpnft_rsp *ct_rsp =
                (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
        struct ct_sns_gpn_ft_data *d;
        struct fab_scan_rp *rp;
 +      u16 cmd = be16_to_cpu(ct_req->command);
 +      u8 fc4_type = sp->gen2;
        int i, j, k;
 +      port_id_t id;
 +      u8 found;
 +      u64 wwn;
 +
 +      j = 0;
 +      for (i = 0; i < num_fibre_dev; i++) {
 +              d  = &ct_rsp->entries[i];
 +
 +              id.b.rsvd_1 = 0;
 +              id.b.domain = d->port_id[0];
 +              id.b.area   = d->port_id[1];
 +              id.b.al_pa  = d->port_id[2];
 +              wwn = wwn_to_u64(d->port_name);
 +
 +              if (id.b24 == 0 || wwn == 0)
 +                      continue;
 +
 +              if (fc4_type == FC4_TYPE_FCP_SCSI) {
 +                      if (cmd == GPN_FT_CMD) {
 +                              rp = &vha->scan.l[j];
 +                              rp->id = id;
 +                              memcpy(rp->port_name, d->port_name, 8);
 +                              j++;
 +                              rp->fc4type = FS_FC4TYPE_FCP;
 +                      } else {
 +                              for (k = 0; k < num_fibre_dev; k++) {
 +                                      rp = &vha->scan.l[k];
 +                                      if (id.b24 == rp->id.b24) {
 +                                              memcpy(rp->node_name,
 +                                                  d->port_name, 8);
 +                                              break;
 +                                      }
 +                              }
 +                      }
 +              } else {
 +                      /* Search if the fibre device supports FC4_TYPE_NVME */
 +                      if (cmd == GPN_FT_CMD) {
 +                              found = 0;
 +
 +                              for (k = 0; k < num_fibre_dev; k++) {
 +                                      rp = &vha->scan.l[k];
 +                                      if (!memcmp(rp->port_name,
 +                                          d->port_name, 8)) {
 +                                              /*
 +                                               * Supports FC-NVMe & FCP
 +                                               */
 +                                              rp->fc4type |= FS_FC4TYPE_NVME;
 +                                              found = 1;
 +                                              break;
 +                                      }
 +                              }
 +
 +                              /* We found new FC-NVMe only port */
 +                              if (!found) {
 +                                      for (k = 0; k < num_fibre_dev; k++) {
 +                                              rp = &vha->scan.l[k];
 +                                              if (wwn_to_u64(rp->port_name)) {
 +                                                      continue;
 +                                              } else {
 +                                                      rp->id = id;
 +                                                      memcpy(rp->port_name,
 +                                                          d->port_name, 8);
 +                                                      rp->fc4type =
 +                                                          FS_FC4TYPE_NVME;
 +                                                      break;
 +                                              }
 +                                      }
 +                              }
 +                      } else {
 +                              for (k = 0; k < num_fibre_dev; k++) {
 +                                      rp = &vha->scan.l[k];
 +                                      if (id.b24 == rp->id.b24) {
 +                                              memcpy(rp->node_name,
 +                                                  d->port_name, 8);
 +                                              break;
 +                                      }
 +                              }
 +                      }
 +              }
 +      }
 +}
 +
 +static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 +{
 +      struct srb *sp = s;
 +      struct scsi_qla_host *vha = sp->vha;
 +      struct qla_work_evt *e;
 +      struct ct_sns_req *ct_req =
 +              (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
        u16 cmd = be16_to_cpu(ct_req->command);
 +      u8 fc4_type = sp->gen2;
 +      unsigned long flags;
  
        /* gen2 field is holding the fc4type */
        ql_dbg(ql_dbg_disc, vha, 0xffff,
                return;
        }
  
 -      if (!res) {
 -              port_id_t id;
 -              u64 wwn;
 -
 -              j = 0;
 -              for (i = 0; i < vha->hw->max_fibre_devices; i++) {
 -                      d  = &ct_rsp->entries[i];
 -
 -                      id.b.rsvd_1 = 0;
 -                      id.b.domain = d->port_id[0];
 -                      id.b.area   = d->port_id[1];
 -                      id.b.al_pa  = d->port_id[2];
 -                      wwn = wwn_to_u64(d->port_name);
 +      if (!res)
 +              qla2x00_find_free_fcp_nvme_slot(vha, sp);
  
 -                      if (id.b24 == 0 || wwn == 0)
 -                              continue;
 +      if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
 +          cmd == GNN_FT_CMD) {
 +              del_timer(&sp->u.iocb_cmd.timer);
 +              spin_lock_irqsave(&vha->work_lock, flags);
 +              vha->scan.scan_flags &= ~SF_SCANNING;
 +              spin_unlock_irqrestore(&vha->work_lock, flags);
  
 -                      if (cmd == GPN_FT_CMD) {
 -                              rp = &vha->scan.l[j];
 -                              rp->id = id;
 -                              memcpy(rp->port_name, d->port_name, 8);
 -                              j++;
 -                      } else {/* GNN_FT_CMD */
 -                              for (k = 0; k < vha->hw->max_fibre_devices;
 -                                  k++) {
 -                                      rp = &vha->scan.l[k];
 -                                      if (id.b24 == rp->id.b24) {
 -                                              memcpy(rp->node_name,
 -                                                  d->port_name, 8);
 -                                              break;
 -                                      }
 -                              }
 +              e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
 +              if (!e) {
 +                      /*
 +                       * please ignore kernel warning. Otherwise,
 +                       * we have mem leak.
 +                       */
 +                      if (sp->u.iocb_cmd.u.ctarg.req) {
 +                              dma_free_coherent(&vha->hw->pdev->dev,
 +                                  sizeof(struct ct_sns_pkt),
 +                                  sp->u.iocb_cmd.u.ctarg.req,
 +                                  sp->u.iocb_cmd.u.ctarg.req_dma);
 +                              sp->u.iocb_cmd.u.ctarg.req = NULL;
 +                      }
 +                      if (sp->u.iocb_cmd.u.ctarg.rsp) {
 +                              dma_free_coherent(&vha->hw->pdev->dev,
 +                                  sizeof(struct ct_sns_pkt),
 +                                  sp->u.iocb_cmd.u.ctarg.rsp,
 +                                  sp->u.iocb_cmd.u.ctarg.rsp_dma);
 +                              sp->u.iocb_cmd.u.ctarg.rsp = NULL;
                        }
 +
 +                      ql_dbg(ql_dbg_disc, vha, 0xffff,
 +                          "Async done-%s unable to alloc work element\n",
 +                          sp->name);
 +                      sp->free(sp);
 +                      set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
 +                      set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 +                      return;
                }
 +              e->u.gpnft.fc4_type = FC4_TYPE_NVME;
 +              sp->rc = res;
 +              e->u.gpnft.sp = sp;
 +
 +              qla2x00_post_work(vha, e);
 +              return;
        }
  
        if (cmd == GPN_FT_CMD)
@@@ -4198,12 -4102,9 +4200,12 @@@ static int qla24xx_async_gnnft(scsi_qla
        int rval = QLA_FUNCTION_FAILED;
        struct ct_sns_req *ct_req;
        struct ct_sns_pkt *ct_sns;
 +      unsigned long flags;
  
        if (!vha->flags.online) {
 +              spin_lock_irqsave(&vha->work_lock, flags);
                vha->scan.scan_flags &= ~SF_SCANNING;
 +              spin_unlock_irqrestore(&vha->work_lock, flags);
                goto done_free_sp;
        }
  
                    "%s: req %p rsp %p are not setup\n",
                    __func__, sp->u.iocb_cmd.u.ctarg.req,
                    sp->u.iocb_cmd.u.ctarg.rsp);
 +              spin_lock_irqsave(&vha->work_lock, flags);
                vha->scan.scan_flags &= ~SF_SCANNING;
 +              spin_unlock_irqrestore(&vha->work_lock, flags);
                WARN_ON(1);
                goto done_free_sp;
        }
 +
 +      ql_dbg(ql_dbg_disc, vha, 0xfffff,
 +          "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
 +          __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
 +           sp->u.iocb_cmd.u.ctarg.req_size);
 +
        sp->type = SRB_CT_PTHRU_CMD;
        sp->name = "gnnft";
        sp->gen1 = vha->hw->base_qpair->chip_reset;
@@@ -4286,17 -4179,15 +4288,17 @@@ void qla24xx_async_gpnft_done(scsi_qla_
  }
  
  /* Get WWPN list for certain fc4_type */
 -int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
 +int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
  {
        int rval = QLA_FUNCTION_FAILED;
        struct ct_sns_req       *ct_req;
 -      srb_t *sp;
        struct ct_sns_pkt *ct_sns;
        u32 rspsz;
        unsigned long flags;
  
 +      ql_dbg(ql_dbg_disc, vha, 0xffff,
 +          "%s enter\n", __func__);
 +
        if (!vha->flags.online)
                return rval;
  
        vha->scan.scan_flags |= SF_SCANNING;
        spin_unlock_irqrestore(&vha->work_lock, flags);
  
 -      sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
 -      if (!sp) {
 -              vha->scan.scan_flags &= ~SF_SCANNING;
 +      if (fc4_type == FC4_TYPE_FCP_SCSI) {
 +              ql_dbg(ql_dbg_disc, vha, 0xffff,
 +                  "%s: Performing FCP Scan\n", __func__);
 +
 +              if (sp)
 +                      sp->free(sp); /* should not happen */
 +
 +              sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
 +              if (!sp) {
 +                      spin_lock_irqsave(&vha->work_lock, flags);
 +                      vha->scan.scan_flags &= ~SF_SCANNING;
 +                      spin_unlock_irqrestore(&vha->work_lock, flags);
 +                      return rval;
 +              }
 +
 +              sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
 +                      &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
 +                      &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
 +              if (!sp->u.iocb_cmd.u.ctarg.req) {
 +                      ql_log(ql_log_warn, vha, 0xffff,
 +                          "Failed to allocate ct_sns request.\n");
 +                      spin_lock_irqsave(&vha->work_lock, flags);
 +                      vha->scan.scan_flags &= ~SF_SCANNING;
 +                      spin_unlock_irqrestore(&vha->work_lock, flags);
 +                      goto done_free_sp;
 +              }
 +              sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
 +
 +              rspsz = sizeof(struct ct_sns_gpnft_rsp) +
 +                      ((vha->hw->max_fibre_devices - 1) *
 +                          sizeof(struct ct_sns_gpn_ft_data));
 +
 +              sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
 +                      &vha->hw->pdev->dev, rspsz,
 +                      &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
 +              if (!sp->u.iocb_cmd.u.ctarg.rsp) {
 +                      ql_log(ql_log_warn, vha, 0xffff,
 +                          "Failed to allocate ct_sns request.\n");
 +                      spin_lock_irqsave(&vha->work_lock, flags);
 +                      vha->scan.scan_flags &= ~SF_SCANNING;
 +                      spin_unlock_irqrestore(&vha->work_lock, flags);
 +                      goto done_free_sp;
 +              }
 +              sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
 +
 +              ql_dbg(ql_dbg_disc, vha, 0xffff,
 +                  "%s scan list size %d\n", __func__, vha->scan.size);
 +
 +              memset(vha->scan.l, 0, vha->scan.size);
 +      } else if (!sp) {
 +              ql_dbg(ql_dbg_disc, vha, 0xffff,
 +                  "NVME scan did not provide SP\n");
                return rval;
        }
  
        sp->gen2 = fc4_type;
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  
 -      sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
 -          sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
 -          GFP_KERNEL);
 -      if (!sp->u.iocb_cmd.u.ctarg.req) {
 -              ql_log(ql_log_warn, vha, 0xffff,
 -                  "Failed to allocate ct_sns request.\n");
 -              vha->scan.scan_flags &= ~SF_SCANNING;
 -              goto done_free_sp;
 -      }
 -
        rspsz = sizeof(struct ct_sns_gpnft_rsp) +
                ((vha->hw->max_fibre_devices - 1) *
                    sizeof(struct ct_sns_gpn_ft_data));
  
 -      sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
 -          rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
 -      if (!sp->u.iocb_cmd.u.ctarg.rsp) {
 -              ql_log(ql_log_warn, vha, 0xffff,
 -                  "Failed to allocate ct_sns request.\n");
 -              vha->scan.scan_flags &= ~SF_SCANNING;
 -              goto done_free_sp;
 -      }
 -
 -      memset(vha->scan.l, 0, vha->scan.size);
 -
        ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
        /* CT_IU preamble  */
        ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
        /* GPN_FT req */
        ct_req->req.gpn_ft.port_type = fc4_type;
  
 -      sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
 -      sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
        sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
  
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
  
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
 +              spin_lock_irqsave(&vha->work_lock, flags);
                vha->scan.scan_flags &= ~SF_SCANNING;
 +              spin_unlock_irqrestore(&vha->work_lock, flags);
                goto done_free_sp;
        }
  
index 77c9177d0c25bd63ae0d28c2fe876bdca1706b60,8d7fab3cd01d28e393a17263b13dab44fe855f8a..8aeb0ed524a1b63b2618d4a2a39dcb01c2925924
@@@ -59,8 -59,6 +59,6 @@@ qla2x00_sp_timeout(struct timer_list *t
        req->outstanding_cmds[sp->handle] = NULL;
        iocb = &sp->u.iocb_cmd;
        iocb->timeout(sp);
-       if (sp->type != SRB_ELS_DCMD)
-               sp->free(sp);
        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  }
  
@@@ -102,7 -100,6 +100,6 @@@ qla2x00_async_iocb_timeout(void *data
        srb_t *sp = data;
        fc_port_t *fcport = sp->fcport;
        struct srb_iocb *lio = &sp->u.iocb_cmd;
-       struct event_arg ea;
  
        if (fcport) {
                ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
  
        switch (sp->type) {
        case SRB_LOGIN_CMD:
-               if (!fcport)
-                       break;
                /* Retry as needed. */
                lio->u.logio.data[0] = MBS_COMMAND_ERROR;
                lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
                        QLA_LOGIO_LOGIN_RETRIED : 0;
-               memset(&ea, 0, sizeof(ea));
-               ea.event = FCME_PLOGI_DONE;
-               ea.fcport = sp->fcport;
-               ea.data[0] = lio->u.logio.data[0];
-               ea.data[1] = lio->u.logio.data[1];
-               ea.sp = sp;
-               qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+               sp->done(sp, QLA_FUNCTION_TIMEOUT);
                break;
        case SRB_LOGOUT_CMD:
-               if (!fcport)
-                       break;
-               qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
-               break;
        case SRB_CT_PTHRU_CMD:
        case SRB_MB_IOCB:
        case SRB_NACK_PLOGI:
@@@ -228,6 -213,7 +213,7 @@@ done_free_sp
        sp->free(sp);
        fcport->flags &= ~FCF_ASYNC_SENT;
  done:
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return rval;
  }
  
@@@ -235,12 -221,10 +221,10 @@@ static voi
  qla2x00_async_logout_sp_done(void *ptr, int res)
  {
        srb_t *sp = ptr;
-       struct srb_iocb *lio = &sp->u.iocb_cmd;
  
        sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-       if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
-               qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
-                   lio->u.logio.data);
+       sp->fcport->login_gen++;
+       qlt_logo_completion_handler(sp->fcport, res);
        sp->free(sp);
  }
  
@@@ -280,7 -264,7 +264,7 @@@ qla2x00_async_logout(struct scsi_qla_ho
  done_free_sp:
        sp->free(sp);
  done:
-       fcport->flags &= ~FCF_ASYNC_SENT;
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        return rval;
  }
  
@@@ -288,6 -272,7 +272,7 @@@ voi
  qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
      uint16_t *data)
  {
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        /* Don't re-login in target mode */
        if (!fcport->tgt_session)
                qla2x00_mark_device_lost(vha, fcport, 1, 0);
@@@ -301,6 -286,7 +286,7 @@@ qla2x00_async_prlo_sp_done(void *s, in
        struct srb_iocb *lio = &sp->u.iocb_cmd;
        struct scsi_qla_host *vha = sp->vha;
  
+       sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
        if (!test_bit(UNLOADING, &vha->dpc_flags))
                qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
                    lio->u.logio.data);
@@@ -339,6 -325,7 +325,7 @@@ qla2x00_async_prlo(struct scsi_qla_hos
  done_free_sp:
        sp->free(sp);
  done:
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return rval;
  }
  
@@@ -392,6 -379,8 +379,8 @@@ qla2x00_async_adisc_sp_done(void *ptr, 
            "Async done-%s res %x %8phC\n",
            sp->name, res, sp->fcport->port_name);
  
+       sp->fcport->flags &= ~FCF_ASYNC_SENT;
        memset(&ea, 0, sizeof(ea));
        ea.event = FCME_ADISC_DONE;
        ea.rc = res;
@@@ -442,7 -431,7 +431,7 @@@ qla2x00_async_adisc(struct scsi_qla_hos
  done_free_sp:
        sp->free(sp);
  done:
-       fcport->flags &= ~FCF_ASYNC_SENT;
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        qla2x00_post_async_adisc_work(vha, fcport, data);
        return rval;
  }
@@@ -660,8 -649,7 +649,7 @@@ qla24xx_async_gnl_sp_done(void *s, int 
                    (loop_id & 0x7fff));
        }
  
-       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
-       vha->gnl.sent = 0;
+       spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
  
        INIT_LIST_HEAD(&h);
        fcport = tf = NULL;
  
        list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
                list_del_init(&fcport->gnl_entry);
+               spin_lock(&vha->hw->tgt.sess_lock);
                fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+               spin_unlock(&vha->hw->tgt.sess_lock);
                ea.fcport = fcport;
  
                qla2x00_fcport_event_handler(vha, &ea);
        }
+       spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
  
+       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        /* create new fcport if fw has knowledge of new sessions */
        for (i = 0; i < n; i++) {
                port_id_t id;
@@@ -727,18 -719,21 +719,21 @@@ int qla24xx_async_gnl(struct scsi_qla_h
        ql_dbg(ql_dbg_disc, vha, 0x20d9,
            "Async-gnlist WWPN %8phC \n", fcport->port_name);
  
-       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+       spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+       if (!list_empty(&fcport->gnl_entry)) {
+               spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+               rval = QLA_SUCCESS;
+               goto done;
+       }
+       spin_lock(&vha->hw->tgt.sess_lock);
        fcport->disc_state = DSC_GNL;
        fcport->last_rscn_gen = fcport->rscn_gen;
        fcport->last_login_gen = fcport->login_gen;
+       spin_unlock(&vha->hw->tgt.sess_lock);
  
        list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
-       if (vha->gnl.sent) {
-               spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-               return QLA_SUCCESS;
-       }
-       vha->gnl.sent = 1;
-       spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+       spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
  
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
        if (!sp)
@@@ -880,6 -875,7 +875,6 @@@ qla24xx_async_prli(struct scsi_qla_hos
                return rval;
  
        if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
 -          fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
            fcport->fw_login_state == DSC_LS_PRLI_PEND)
                return rval;
  
@@@ -1065,6 -1061,7 +1060,7 @@@ void qla24xx_handle_gpdb_event(scsi_qla
        fc_port_t *fcport = ea->fcport;
        struct port_database_24xx *pd;
        struct srb *sp = ea->sp;
+       uint8_t ls;
  
        pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
  
        if (fcport->disc_state == DSC_DELETE_PEND)
                return;
  
-       switch (pd->current_login_state) {
+       if (fcport->fc4f_nvme)
+               ls = pd->current_login_state >> 4;
+       else
+               ls = pd->current_login_state & 0xf;
+       switch (ls) {
        case PDS_PRLI_COMPLETE:
                __qla24xx_parse_gpdb(vha, fcport, pd);
                break;
@@@ -1167,8 -1169,9 +1168,9 @@@ int qla24xx_fcport_handle_login(struct 
        if (fcport->scan_state != QLA_FCPORT_FOUND)
                return 0;
  
-       if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
-           (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+       if ((fcport->loop_id != FC_NO_LOOP_ID) &&
+           ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+            (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
                return 0;
  
        if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
                qla2x00_post_async_adisc_work(vha, fcport, data);
                break;
  
 +      case DSC_LOGIN_PEND:
 +              if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
 +                      qla24xx_post_prli_work(vha, fcport);
 +              break;
 +
        default:
                break;
        }
@@@ -1548,6 -1546,7 +1550,7 @@@ qla24xx_abort_sp_done(void *ptr, int re
        srb_t *sp = ptr;
        struct srb_iocb *abt = &sp->u.iocb_cmd;
  
+       del_timer(&sp->u.iocb_cmd.timer);
        complete(&abt->u.abt.comp);
  }
  
@@@ -1644,13 -1643,6 +1647,13 @@@ qla24xx_handle_prli_done_event(struct s
                qla24xx_post_gpdb_work(vha, ea->fcport, 0);
                break;
        default:
 +              if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
 +                  (ea->iop[1] == 0x50000)) {   /* reson 5=busy expl:0x0 */
 +                      set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 +                      ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
 +                      break;
 +              }
 +
                if (ea->fcport->n2n_flag) {
                        ql_dbg(ql_dbg_disc, vha, 0x2118,
                                "%s %d %8phC post fc4 prli\n",
@@@ -1727,7 -1719,6 +1730,6 @@@ qla24xx_handle_plogi_done_event(struct 
  
                        set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
-                       ea->fcport->loop_id = FC_NO_LOOP_ID;
                        ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
                        ea->fcport->logout_on_delete = 1;
                        ea->fcport->send_els_logo = 0;
@@@ -1819,6 -1810,7 +1821,7 @@@ qla2x00_async_logout_done(struct scsi_q
        qla2x00_mark_device_lost(vha, fcport, 1, 0);
        qlt_logo_completion_handler(fcport, data[0]);
        fcport->login_gen++;
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return;
  }
  
@@@ -1826,6 -1818,7 +1829,7 @@@ voi
  qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
      uint16_t *data)
  {
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        if (data[0] == MBS_COMMAND_COMPLETE) {
                qla2x00_update_fcport(vha, fcport);
  
        }
  
        /* Retry login. */
-       fcport->flags &= ~FCF_ASYNC_SENT;
        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
        else
@@@ -2057,7 -2049,7 +2060,7 @@@ qla2x00_initialize_adapter(scsi_qla_hos
  
  /**
   * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2088,7 -2080,7 +2091,7 @@@ qla2100_pci_config(scsi_qla_host_t *vha
  
  /**
   * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2170,7 -2162,7 +2173,7 @@@ qla2300_pci_config(scsi_qla_host_t *vha
  
  /**
   * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2214,7 -2206,7 +2217,7 @@@ qla24xx_pci_config(scsi_qla_host_t *vha
  
  /**
   * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2245,7 -2237,7 +2248,7 @@@ qla25xx_pci_config(scsi_qla_host_t *vha
  
  /**
   * qla2x00_isp_firmware() - Choose firmware image.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2281,7 -2273,7 +2284,7 @@@ qla2x00_isp_firmware(scsi_qla_host_t *v
  
  /**
   * qla2x00_reset_chip() - Reset ISP chip.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2425,7 -2417,6 +2428,7 @@@ qla2x00_reset_chip(scsi_qla_host_t *vha
  
  /**
   * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2442,7 -2433,7 +2445,7 @@@ qla81xx_reset_mpi(scsi_qla_host_t *vha
  
  /**
   * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2657,7 -2648,7 +2660,7 @@@ acquired
  
  /**
   * qla24xx_reset_chip() - Reset ISP24xx chip.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2681,7 -2672,7 +2684,7 @@@ qla24xx_reset_chip(scsi_qla_host_t *vha
  
  /**
   * qla2x00_chip_diag() - Test chip for proper operation.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -2700,8 -2691,8 +2703,8 @@@ qla2x00_chip_diag(scsi_qla_host_t *vha
        /* Assume a failed state */
        rval = QLA_FUNCTION_FAILED;
  
 -      ql_dbg(ql_dbg_init, vha, 0x007b,
 -          "Testing device at %lx.\n", (u_long)&reg->flash_address);
 +      ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
 +             &reg->flash_address);
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
  
@@@ -2805,7 -2796,7 +2808,7 @@@ chip_diag_failed
  
  /**
   * qla24xx_chip_diag() - Test ISP24xx for proper operation.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -3273,7 -3264,7 +3276,7 @@@ out
  
  /**
   * qla2x00_setup_chip() - Load and start RISC firmware.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -3428,7 -3419,7 +3431,7 @@@ failed
  
  /**
   * qla2x00_init_response_q_entries() - Initializes response queue entries.
 - * @ha: HA context
 + * @rsp: response queue
   *
   * Beginning of request ring has initialization control block already built
   * by nvram config routine.
@@@ -3453,7 -3444,7 +3456,7 @@@ qla2x00_init_response_q_entries(struct 
  
  /**
   * qla2x00_update_fw_options() - Read and process firmware options.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -3716,7 -3707,7 +3719,7 @@@ qla24xx_config_rings(struct scsi_qla_ho
  
  /**
   * qla2x00_init_rings() - Initializes firmware.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Beginning of request ring has initialization control block already built
   * by nvram config routine.
@@@ -3824,7 -3815,7 +3827,7 @@@ next_check
  
  /**
   * qla2x00_fw_ready() - Waits for firmware ready.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns 0 on success.
   */
@@@ -4492,7 -4483,7 +4495,7 @@@ qla2x00_rport_del(void *data
  
  /**
   * qla2x00_alloc_fcport() - Allocate a generic fcport.
 - * @ha: HA context
 + * @vha: HA context
   * @flags: allocation flags
   *
   * Returns a pointer to the allocated fcport, or NULL, if none available.
@@@ -5036,9 -5027,9 +5039,9 @@@ qla2x00_iidma_fcport(scsi_qla_host_t *v
                    fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
        } else {
                ql_dbg(ql_dbg_disc, vha, 0x2005,
 -                  "iIDMA adjusted to %s GB/s on %8phN.\n",
 +                  "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
                    qla2x00_get_link_speed_str(ha, fcport->fp_speed),
 -                  fcport->port_name);
 +                  fcport->fp_speed, fcport->port_name);
        }
  }
  
@@@ -5118,14 -5109,13 +5121,14 @@@ qla2x00_update_fcport(scsi_qla_host_t *
        fcport->deleted = 0;
        fcport->logout_on_delete = 1;
  
 +      qla2x00_set_fcport_state(fcport, FCS_ONLINE);
 +      qla2x00_iidma_fcport(vha, fcport);
 +
        if (fcport->fc4f_nvme) {
                qla_nvme_register_remote(vha, fcport);
                return;
        }
  
 -      qla2x00_set_fcport_state(fcport, FCS_ONLINE);
 -      qla2x00_iidma_fcport(vha, fcport);
        qla24xx_update_fcport_fcp_prio(vha, fcport);
  
  reg_port:
@@@ -5264,8 -5254,8 +5267,8 @@@ qla2x00_configure_fabric(scsi_qla_host_
                qlt_do_generation_tick(vha, &discovery_gen);
  
                if (USE_ASYNC_SCAN(ha)) {
 -                      rval = QLA_SUCCESS;
 -                      rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
 +                      rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
 +                          NULL);
                        if (rval)
                                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                } else  {
@@@ -5528,14 -5518,6 +5531,14 @@@ qla2x00_find_all_fabric_devs(scsi_qla_h
                        break;
                }
  
 +              if (fcport->fc4f_nvme) {
 +                      if (fcport->disc_state == DSC_DELETE_PEND) {
 +                              fcport->disc_state = DSC_GNL;
 +                              vha->fcport_count--;
 +                              fcport->login_succ = 0;
 +                      }
 +              }
 +
                if (found) {
                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
                        continue;
@@@ -8416,6 -8398,7 +8419,6 @@@ struct qla_qpair *qla2xxx_create_qpair(
                qpair->vp_idx = vp_idx;
                qpair->fw_started = ha->flags.fw_started;
                INIT_LIST_HEAD(&qpair->hints_list);
 -              INIT_LIST_HEAD(&qpair->nvme_done_list);
                qpair->chip_reset = ha->base_qpair->chip_reset;
                qpair->enable_class_2 = ha->base_qpair->enable_class_2;
                qpair->enable_explicit_conf =
index a4edbecfaf968f8ad56523d718539c7ca9494992,8d00d559bd2659b13bb2362034dffd855a370d44..f74ff7b550b64348d511c930d474bffd991a9081
@@@ -14,7 -14,7 +14,7 @@@
  
  /**
   * qla2x00_get_cmd_direction() - Determine control_flag data direction.
 - * @cmd: SCSI command
 + * @sp: SCSI command
   *
   * Returns the proper CF_* direction based on CDB.
   */
@@@ -86,7 -86,7 +86,7 @@@ qla2x00_calc_iocbs_64(uint16_t dsds
  
  /**
   * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
 - * @ha: HA context
 + * @vha: HA context
   *
   * Returns a pointer to the Continuation Type 0 IOCB packet.
   */
@@@ -114,8 -114,7 +114,8 @@@ qla2x00_prep_cont_type0_iocb(struct scs
  
  /**
   * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 - * @ha: HA context
 + * @vha: HA context
 + * @req: request queue
   *
   * Returns a pointer to the continuation type 1 IOCB packet.
   */
@@@ -446,8 -445,6 +446,8 @@@ queuing_error
  
  /**
   * qla2x00_start_iocbs() - Execute the IOCB command
 + * @vha: HA context
 + * @req: request queue
   */
  void
  qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
  
  /**
   * qla2x00_marker() - Send a marker IOCB to the firmware.
 - * @ha: HA context
 + * @vha: HA context
 + * @req: request queue
 + * @rsp: response queue
   * @loop_id: loop ID
   * @lun: LUN
   * @type: marker modifier
@@@ -1195,8 -1190,6 +1195,8 @@@ qla24xx_walk_and_build_prot_sglist(stru
   * @sp: SRB command to process
   * @cmd_pkt: Command type 3 IOCB
   * @tot_dsds: Total number of segments to transfer
 + * @tot_prot_dsds:
 + * @fw_prot_opts:
   */
  inline int
  qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        uint32_t                dif_bytes;
        uint8_t                 bundling = 1;
        uint16_t                blk_size;
 -      uint8_t                 *clr_ptr;
        struct crc_context      *crc_ctx_pkt = NULL;
        struct qla_hw_data      *ha;
        uint8_t                 additional_fcpcdb_len;
  
        /* Allocate CRC context from global pool */
        crc_ctx_pkt = sp->u.scmd.ctx =
 -          dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 +          dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
  
        if (!crc_ctx_pkt)
                goto crc_queuing_error;
  
 -      /* Zero out CTX area. */
 -      clr_ptr = (uint8_t *)crc_ctx_pkt;
 -      memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
 -
        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
  
        sp->flags |= SRB_CRC_CTX_DMA_VALID;
@@@ -3069,7 -3067,7 +3069,7 @@@ sufficient_dsds
                }
  
                memset(ctx, 0, sizeof(struct ct6_dsd));
 -              ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
 +              ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
                        GFP_ATOMIC, &ctx->fcp_cmnd_dma);
                if (!ctx->fcp_cmnd) {
                        ql_log(ql_log_fatal, vha, 0x3011,
                host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
  
                /* build FCP_CMND IU */
 -              memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
                int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
                ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
  
@@@ -3276,12 -3275,11 +3276,11 @@@ qla24xx_abort_iocb(srb_t *sp, struct ab
        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
        abt_iocb->entry_type = ABORT_IOCB_TYPE;
        abt_iocb->entry_count = 1;
-       abt_iocb->handle =
-            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
-                aio->u.abt.cmd_hndl));
+       abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
        abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        abt_iocb->handle_to_abort =
-           cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+           cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+                                   aio->u.abt.cmd_hndl));
        abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
        abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
        abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
index 5fbb8f4b4dc7e80ee64807f4ba547c51ca7fc8c8,89f93ebd819d7590a5ece0dd689f6bd350be7393..7cacdc3408fa02712e6c58085a590441f03b9963
@@@ -259,7 -259,7 +259,7 @@@ qla2300_intr_handler(int irq, void *dev
  
  /**
   * qla2x00_mbx_completion() - Process mailbox command completions.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
   * @mb0: Mailbox0 register
   */
  static void
@@@ -272,7 -272,8 +272,8 @@@ qla2x00_mbx_completion(scsi_qla_host_t 
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
  
        /* Read all mbox registers? */
-       mboxes = (1 << ha->mbx_count) - 1;
+       WARN_ON_ONCE(ha->mbx_count > 32);
+       mboxes = (1ULL << ha->mbx_count) - 1;
        if (!ha->mcp)
                ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
        else
@@@ -612,8 -613,7 +613,8 @@@ qla2x00_find_fcport_by_nportid(scsi_qla
  
  /**
   * qla2x00_async_event() - Process aynchronous events.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
 + * @rsp: response queue
   * @mb: Mailbox registers (0 - 3)
   */
  void
@@@ -767,6 -767,7 +768,6 @@@ skip_rio
  
        case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
                ha->flags.lip_ae = 1;
 -              ha->flags.n2n_ae = 0;
  
                ql_dbg(ql_dbg_async, vha, 0x5009,
                    "LIP occurred (%x).\n", mb[1]);
  
        case MBA_LOOP_DOWN:             /* Loop Down Event */
                SAVE_TOPO(ha);
 -              ha->flags.n2n_ae = 0;
                ha->flags.lip_ae = 0;
                ha->current_topology = 0;
  
        /* case MBA_DCBX_COMPLETE: */
        case MBA_POINT_TO_POINT:        /* Point-to-Point */
                ha->flags.lip_ae = 0;
 -              ha->flags.n2n_ae = 1;
  
                if (IS_QLA2100(ha))
                        break;
@@@ -1253,8 -1256,7 +1254,8 @@@ global_port_update
  
  /**
   * qla2x00_process_completed_request() - Process a Fast Post response.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
 + * @req: request queue
   * @index: SRB index
   */
  void
@@@ -1837,23 -1839,31 +1838,23 @@@ qla24xx_tm_iocb_entry(scsi_qla_host_t *
        sp->done(sp, 0);
  }
  
 -static void
 -qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
 +static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 +    void *tsk, srb_t *sp)
  {
 -      const char func[] = "NVME-IOCB";
        fc_port_t *fcport;
 -      srb_t *sp;
        struct srb_iocb *iocb;
        struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
        uint16_t        state_flags;
        struct nvmefc_fcp_req *fd;
        uint16_t        ret = 0;
 -      struct srb_iocb *nvme;
 -
 -      sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
 -      if (!sp)
 -              return;
  
        iocb = &sp->u.iocb_cmd;
        fcport = sp->fcport;
        iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
        state_flags  = le16_to_cpu(sts->state_flags);
        fd = iocb->u.nvme.desc;
 -      nvme = &sp->u.iocb_cmd;
  
 -      if (unlikely(nvme->u.nvme.aen_op))
 +      if (unlikely(iocb->u.nvme.aen_op))
                atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
  
        /*
        fd->transferred_length = fd->payload_length -
            le32_to_cpu(sts->residual_len);
  
 -      /*
 -       * If transport error then Failure (HBA rejects request)
 -       * otherwise transport will handle.
 -       */
 -      if (sts->entry_status) {
 -              ql_log(ql_log_warn, fcport->vha, 0x5038,
 -                  "NVME-%s error - hdl=%x entry-status(%x).\n",
 -                  sp->name, sp->handle, sts->entry_status);
 +      switch (le16_to_cpu(sts->comp_status)) {
 +      case CS_COMPLETE:
 +              ret = QLA_SUCCESS;
 +              break;
 +      case CS_ABORTED:
 +      case CS_RESET:
 +      case CS_PORT_UNAVAILABLE:
 +      case CS_PORT_LOGGED_OUT:
 +      case CS_PORT_BUSY:
 +              ql_log(ql_log_warn, fcport->vha, 0x5060,
 +                  "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
 +                  sp->name, sp->handle, sts->comp_status,
 +                  le32_to_cpu(sts->residual_len), sts->ox_id);
 +              fd->transferred_length = 0;
 +              iocb->u.nvme.rsp_pyld_len = 0;
 +              ret = QLA_ABORTED;
 +              break;
 +      default:
 +              ql_log(ql_log_warn, fcport->vha, 0x5060,
 +                  "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
 +                  sp->name, sp->handle, sts->comp_status,
 +                  le32_to_cpu(sts->residual_len), sts->ox_id);
                ret = QLA_FUNCTION_FAILED;
 -      } else  {
 -              switch (le16_to_cpu(sts->comp_status)) {
 -                      case CS_COMPLETE:
 -                              ret = 0;
 -                      break;
 -
 -                      case CS_ABORTED:
 -                      case CS_RESET:
 -                      case CS_PORT_UNAVAILABLE:
 -                      case CS_PORT_LOGGED_OUT:
 -                      case CS_PORT_BUSY:
 -                              ql_log(ql_log_warn, fcport->vha, 0x5060,
 -                              "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
 -                              sp->name, sp->handle, sts->comp_status,
 -                              le32_to_cpu(sts->residual_len), sts->ox_id);
 -                              fd->transferred_length = fd->payload_length;
 -                              ret = QLA_ABORTED;
 -                      break;
 -
 -                      default:
 -                              ql_log(ql_log_warn, fcport->vha, 0x5060,
 -                              "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
 -                              sp->name, sp->handle, sts->comp_status,
 -                              le32_to_cpu(sts->residual_len), sts->ox_id);
 -                              ret = QLA_FUNCTION_FAILED;
 -                              break;
 -              }
 +              break;
        }
        sp->done(sp, ret);
  }
@@@ -1948,7 -1970,7 +1949,7 @@@ static void qla_ctrlvp_completed(scsi_q
  
  /**
   * qla2x00_process_response_queue() - Process response queue entries.
 - * @ha: SCSI driver HA context
 + * @rsp: response queue
   */
  void
  qla2x00_process_response_queue(struct rsp_que *rsp)
@@@ -2352,8 -2374,7 +2353,8 @@@ done
  
  /**
   * qla2x00_status_entry() - Process a Status IOCB entry.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
 + * @rsp: response queue
   * @pkt: Entry pointer
   */
  static void
@@@ -2438,8 -2459,7 +2439,8 @@@ qla2x00_status_entry(scsi_qla_host_t *v
  
        /* NVME completion. */
        if (sp->type == SRB_NVME_CMD) {
 -              qla24xx_nvme_iocb_entry(vha, req, pkt);
 +              req->outstanding_cmds[handle] = NULL;
 +              qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
                return;
        }
  
@@@ -2731,7 -2751,7 +2732,7 @@@ out
  
  /**
   * qla2x00_status_cont_entry() - Process a Status Continuations entry.
 - * @ha: SCSI driver HA context
 + * @rsp: response queue
   * @pkt: Entry pointer
   *
   * Extended sense data.
@@@ -2789,8 -2809,7 +2790,8 @@@ qla2x00_status_cont_entry(struct rsp_qu
  
  /**
   * qla2x00_error_entry() - Process an error entry.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
 + * @rsp: response queue
   * @pkt: Entry pointer
   * return : 1=allow further error analysis. 0=no additional error analysis.
   */
@@@ -2849,7 -2868,7 +2850,7 @@@ fatal
  
  /**
   * qla24xx_mbx_completion() - Process mailbox command completions.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
   * @mb0: Mailbox0 register
   */
  static void
@@@ -2862,7 -2881,8 +2863,8 @@@ qla24xx_mbx_completion(scsi_qla_host_t 
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  
        /* Read all mbox registers? */
-       mboxes = (1 << ha->mbx_count) - 1;
+       WARN_ON_ONCE(ha->mbx_count > 32);
+       mboxes = (1ULL << ha->mbx_count) - 1;
        if (!ha->mcp)
                ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
        else
@@@ -2917,8 -2937,7 +2919,8 @@@ void qla24xx_nvme_ls4_iocb(struct scsi_
  
  /**
   * qla24xx_process_response_queue() - Process response queue entries.
 - * @ha: SCSI driver HA context
 + * @vha: SCSI driver HA context
 + * @rsp: response queue
   */
  void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        struct rsp_que *rsp)
index c9a134ae0d2b127adebd460438af54f1ab732922,9a97f2ceffbab1e7a55402719efda19be7b264c7..5db0262d5c94690c46f1fc2d11bdbffc780f08b4
@@@ -503,11 -503,19 +503,19 @@@ mbx_done
                                }
                        pr_warn(" cmd=%x ****\n", command);
                }
-               ql_dbg(ql_dbg_mbx, vha, 0x1198,
-                   "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
-                   RD_REG_DWORD(&reg->isp24.host_status),
-                   RD_REG_DWORD(&reg->isp24.ictrl),
-                   RD_REG_DWORD(&reg->isp24.istatus));
+               if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
+                       ql_dbg(ql_dbg_mbx, vha, 0x1198,
+                           "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
+                           RD_REG_DWORD(&reg->isp24.host_status),
+                           RD_REG_DWORD(&reg->isp24.ictrl),
+                           RD_REG_DWORD(&reg->isp24.istatus));
+               } else {
+                       ql_dbg(ql_dbg_mbx, vha, 0x1206,
+                           "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
+                           RD_REG_WORD(&reg->isp.ctrl_status),
+                           RD_REG_WORD(&reg->isp.ictrl),
+                           RD_REG_WORD(&reg->isp.istatus));
+               }
        } else {
                ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
        }
@@@ -1025,12 -1033,9 +1033,12 @@@ qla2x00_get_fw_version(scsi_qla_host_t 
                 * FW supports nvme and driver load parameter requested nvme.
                 * BIT 26 of fw_attributes indicates NVMe support.
                 */
 -              if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
 +              if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
                        vha->flags.nvme_enabled = 1;
 -
 +                      ql_log(ql_log_info, vha, 0xd302,
 +                          "%s: FC-NVMe is Enabled (0x%x)\n",
 +                           __func__, ha->fw_attributes_h);
 +              }
        }
  
        if (IS_QLA27XX(ha)) {
@@@ -3388,10 -3393,7 +3396,10 @@@ qla8044_read_serdes_word(scsi_qla_host_
  
  /**
   * qla2x00_set_serdes_params() -
 - * @ha: HA context
 + * @vha: HA context
 + * @sw_em_1g:
 + * @sw_em_2g:
 + * @sw_em_4g:
   *
   * Returns
   */
@@@ -3750,7 -3752,6 +3758,7 @@@ qla24xx_report_id_acquisition(scsi_qla_
        id.b.area   = rptid_entry->port_id[1];
        id.b.al_pa  = rptid_entry->port_id[0];
        id.b.rsvd_1 = 0;
 +      ha->flags.n2n_ae = 0;
  
        if (rptid_entry->format == 0) {
                /* loop */
                        set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
                        set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
                        set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
 +                      ha->flags.n2n_ae = 1;
                        return;
                }
  
                vha->d_id.b.area = rptid_entry->port_id[1];
                vha->d_id.b.al_pa = rptid_entry->port_id[0];
  
 +              ha->flags.n2n_ae = 1;
                spin_lock_irqsave(&ha->vport_slock, flags);
                qlt_update_vp_map(vha, SET_AL_PA);
                spin_unlock_irqrestore(&ha->vport_slock, flags);
index 6fa2467e2a16983103a93d57caeffbe0fa64c011,b12fea6367b594b6c8bd9b80c636967218914f06..fb35d9e9491259645af380678e438a5ed37483bd
@@@ -397,6 -397,7 +397,6 @@@ static void qla_init_base_qpair(struct 
        ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
        ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
        INIT_LIST_HEAD(&ha->base_qpair->hints_list);
 -      INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
        ha->base_qpair->enable_class_2 = ql2xenableclass2;
        /* init qpair to this cpu. Will adjust at run time. */
        qla_cpu_update(rsp->qpair, raw_smp_processor_id());
@@@ -453,7 -454,7 +453,7 @@@ static int qla2x00_alloc_queues(struct 
        ha->req_q_map[0] = req;
        set_bit(0, ha->rsp_qid_map);
        set_bit(0, ha->req_qid_map);
-       return 1;
+       return 0;
  
  fail_qpair_map:
        kfree(ha->base_qpair);
@@@ -470,6 -471,9 +470,9 @@@ fail_req_map
  
  static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
  {
+       if (!ha->req_q_map)
+               return;
        if (IS_QLAFX00(ha)) {
                if (req && req->ring_fx00)
                        dma_free_coherent(&ha->pdev->dev,
                (req->length + 1) * sizeof(request_t),
                req->ring, req->dma);
  
-       if (req)
+       if (req) {
                kfree(req->outstanding_cmds);
-       kfree(req);
+               kfree(req);
+       }
  }
  
  static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
  {
+       if (!ha->rsp_q_map)
+               return;
        if (IS_QLAFX00(ha)) {
-               if (rsp && rsp->ring)
+               if (rsp && rsp->ring_fx00)
                        dma_free_coherent(&ha->pdev->dev,
                            (rsp->length_fx00 + 1) * sizeof(request_t),
                            rsp->ring_fx00, rsp->dma_fx00);
                (rsp->length + 1) * sizeof(response_t),
                rsp->ring, rsp->dma);
        }
-       kfree(rsp);
+       if (rsp)
+               kfree(rsp);
  }
  
  static void qla2x00_free_queues(struct qla_hw_data *ha)
@@@ -1722,6 -1730,8 +1729,8 @@@ __qla2x00_abort_all_cmds(struct qla_qpa
        struct qla_tgt_cmd *cmd;
        uint8_t trace = 0;
  
+       if (!ha->req_q_map)
+               return;
        spin_lock_irqsave(qp->qp_lock_ptr, flags);
        req = qp->req;
        for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                                        sp_get(sp);
                                        spin_unlock_irqrestore(qp->qp_lock_ptr,
                                            flags);
 -                                      qla_nvme_abort(ha, sp);
 +                                      qla_nvme_abort(ha, sp, res);
                                        spin_lock_irqsave(qp->qp_lock_ptr,
                                            flags);
                                } else if (GET_CMD_SP(sp) &&
@@@ -3094,14 -3104,14 +3103,14 @@@ qla2x00_probe_one(struct pci_dev *pdev
        /* Set up the irqs */
        ret = qla2x00_request_irqs(ha, rsp);
        if (ret)
-               goto probe_hw_failed;
+               goto probe_failed;
  
        /* Alloc arrays of request and response ring ptrs */
-       if (!qla2x00_alloc_queues(ha, req, rsp)) {
+       if (qla2x00_alloc_queues(ha, req, rsp)) {
                ql_log(ql_log_fatal, base_vha, 0x003d,
                    "Failed to allocate memory for queue pointers..."
                    "aborting.\n");
-               goto probe_init_failed;
+               goto probe_failed;
        }
  
        if (ha->mqenable && shost_use_blk_mq(host)) {
@@@ -3386,15 -3396,6 +3395,6 @@@ skip_dpc
  
        return 0;
  
- probe_init_failed:
-       qla2x00_free_req_que(ha, req);
-       ha->req_q_map[0] = NULL;
-       clear_bit(0, ha->req_qid_map);
-       qla2x00_free_rsp_que(ha, rsp);
-       ha->rsp_q_map[0] = NULL;
-       clear_bit(0, ha->rsp_qid_map);
-       ha->max_req_queues = ha->max_rsp_queues = 0;
  probe_failed:
        if (base_vha->timer_active)
                qla2x00_stop_timer(base_vha);
@@@ -3624,6 -3625,8 +3624,8 @@@ qla2x00_remove_one(struct pci_dev *pdev
        }
        qla2x00_wait_for_hba_ready(base_vha);
  
+       qla2x00_wait_for_sess_deletion(base_vha);
        /*
         * if UNLOAD flag is already set, then continue unload,
         * where it was set first.
@@@ -4505,11 -4508,17 +4507,17 @@@ qla2x00_mem_free(struct qla_hw_data *ha
        if (ha->init_cb)
                dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
                        ha->init_cb, ha->init_cb_dma);
-       vfree(ha->optrom_buffer);
-       kfree(ha->nvram);
-       kfree(ha->npiv_info);
-       kfree(ha->swl);
-       kfree(ha->loop_id_map);
+       if (ha->optrom_buffer)
+               vfree(ha->optrom_buffer);
+       if (ha->nvram)
+               kfree(ha->nvram);
+       if (ha->npiv_info)
+               kfree(ha->npiv_info);
+       if (ha->swl)
+               kfree(ha->swl);
+       if (ha->loop_id_map)
+               kfree(ha->loop_id_map);
  
        ha->srb_mempool = NULL;
        ha->ctx_mempool = NULL;
        ha->ex_init_cb_dma = 0;
        ha->async_pd = NULL;
        ha->async_pd_dma = 0;
+       ha->loop_id_map = NULL;
+       ha->npiv_info = NULL;
+       ha->optrom_buffer = NULL;
+       ha->swl = NULL;
+       ha->nvram = NULL;
+       ha->mctp_dump = NULL;
+       ha->dcbx_tlv = NULL;
+       ha->xgmac_data = NULL;
+       ha->sfp_data = NULL;
  
        ha->s_dma_pool = NULL;
        ha->dl_dma_pool = NULL;
@@@ -4574,6 -4592,7 +4591,7 @@@ struct scsi_qla_host *qla2x00_create_ho
  
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
+       spin_lock_init(&vha->gnl.fcports_lock);
        init_waitqueue_head(&vha->fcport_waitQ);
        init_waitqueue_head(&vha->vref_waitq);
  
@@@ -4803,14 -4822,12 +4821,14 @@@ void qla24xx_create_new_sess(struct scs
                        fcport->d_id = e->u.new_sess.id;
                        fcport->flags |= FCF_FABRIC_DEVICE;
                        fcport->fw_login_state = DSC_LS_PLOGI_PEND;
 -                      if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) {
 +                      if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
                                fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 -                      } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) {
 +
 +                      if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
                                fcport->fc4_type = FC4_TYPE_OTHER;
                                fcport->fc4f_nvme = FC4_TYPE_NVME;
                        }
 +
                        memcpy(fcport->port_name, e->u.new_sess.port_name,
                            WWN_SIZE);
                } else {
                        }
                        qlt_plogi_ack_unref(vha, pla);
                } else {
+                       fc_port_t *dfcp = NULL;
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                        tfcp = qla2x00_find_fcport_by_nportid(vha,
                            &e->u.new_sess.id, 1);
                                default:
                                        fcport->login_pause = 1;
                                        tfcp->conflict = fcport;
-                                       qlt_schedule_sess_for_deletion(tfcp);
+                                       dfcp = tfcp;
                                        break;
                                }
                        }
                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+                       if (dfcp)
+                               qlt_schedule_sess_for_deletion(tfcp);
  
                        wwn = wwn_to_u64(fcport->node_name);
  
@@@ -5026,8 -5047,7 +5048,8 @@@ qla2x00_do_work(struct scsi_qla_host *v
                            e->u.logio.data);
                        break;
                case QLA_EVT_GPNFT:
 -                      qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
 +                      qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
 +                          e->u.gpnft.sp);
                        break;
                case QLA_EVT_GPNFT_DONE:
                        qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
index ead6813ea9b3ce38e03eb0c23bf2794562a6642c,b49ac85f3de2254694e728f75d4cf2fa7e260662..5546ac9c3d9dd6028a45742740a4fa337420426c
@@@ -961,7 -961,7 +961,7 @@@ qlt_send_first_logo(struct scsi_qla_hos
            logo->cmd_count, res);
  }
  
 -static void qlt_free_session_done(struct work_struct *work)
 +void qlt_free_session_done(struct work_struct *work)
  {
        struct fc_port *sess = container_of(work, struct fc_port,
            free_work);
@@@ -1169,14 -1169,11 +1169,14 @@@ void qlt_unreg_sess(struct fc_port *ses
        sess->last_rscn_gen = sess->rscn_gen;
        sess->last_login_gen = sess->login_gen;
  
 -      if (sess->nvme_flag & NVME_FLAG_REGISTERED)
 +      if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
 +          !(sess->nvme_flag & NVME_FLAG_DELETING)) {
 +              sess->nvme_flag |= NVME_FLAG_DELETING;
                schedule_work(&sess->nvme_del_work);
 -
 -      INIT_WORK(&sess->free_work, qlt_free_session_done);
 -      schedule_work(&sess->free_work);
 +      } else {
 +              INIT_WORK(&sess->free_work, qlt_free_session_done);
 +              schedule_work(&sess->free_work);
 +      }
  }
  EXPORT_SYMBOL(qlt_unreg_sess);
  
@@@ -1227,10 -1224,10 +1227,10 @@@ static void qla24xx_chk_fcp_state(struc
        }
  }
  
- /* ha->tgt.sess_lock supposed to be held on entry */
  void qlt_schedule_sess_for_deletion(struct fc_port *sess)
  {
        struct qla_tgt *tgt = sess->tgt;
+       struct qla_hw_data *ha = sess->vha->hw;
        unsigned long flags;
  
        if (sess->disc_state == DSC_DELETE_PEND)
                        return;
        }
  
+       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->deleted == QLA_SESS_DELETED)
                sess->logout_on_delete = 0;
  
-       spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
-               spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
                return;
        }
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  
        sess->disc_state = DSC_DELETE_PEND;
  
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
            "Scheduling sess %p for deletion\n", sess);
  
-       /* use cancel to push work element through before re-queue */
-       cancel_work_sync(&sess->del_work);
        INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
-       queue_work(sess->vha->hw->wq, &sess->del_work);
+       WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
  }
  
- /* ha->tgt.sess_lock supposed to be held on entry */
  static void qlt_clear_tgt_db(struct qla_tgt *tgt)
  {
        struct fc_port *sess;
@@@ -1454,8 -1448,8 +1451,8 @@@ qlt_fc_port_deleted(struct scsi_qla_hos
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
  
        sess->local = 1;
-       qlt_schedule_sess_for_deletion(sess);
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+       qlt_schedule_sess_for_deletion(sess);
  }
  
  static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@@ -1515,10 -1509,8 +1512,8 @@@ int qlt_stop_phase1(struct qla_tgt *tgt
         * Lock is needed, because we still can get an incoming packet.
         */
        mutex_lock(&vha->vha_tgt.tgt_mutex);
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        tgt->tgt_stop = 1;
        qlt_clear_tgt_db(tgt);
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
        mutex_unlock(&vha->vha_tgt.tgt_mutex);
        mutex_unlock(&qla_tgt_mutex);
  
@@@ -2031,7 -2023,7 +2026,7 @@@ static void qlt_24xx_handle_abts(struc
        sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
        if (!sess) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
 -                  "qla_target(%d): task abort for non-existant session\n",
 +                  "qla_target(%d): task abort for non-existent session\n",
                    vha->vp_idx);
                spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  
@@@ -2874,6 -2866,7 +2869,6 @@@ qlt_build_ctio_crc2_pkt(struct qla_qpai
        uint32_t                data_bytes;
        uint32_t                dif_bytes;
        uint8_t                 bundling = 1;
 -      uint8_t                 *clr_ptr;
        struct crc_context      *crc_ctx_pkt = NULL;
        struct qla_hw_data      *ha;
        struct ctio_crc2_to_fw  *pkt;
  
        /* Allocate CRC context from global pool */
        crc_ctx_pkt = cmd->ctx =
 -          dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 +          dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
  
        if (!crc_ctx_pkt)
                goto crc_queuing_error;
  
 -      /* Zero out CTX area. */
 -      clr_ptr = (uint8_t *)crc_ctx_pkt;
 -      memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
 -
        crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
        INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
  
@@@ -4869,8 -4866,6 +4864,6 @@@ static int qlt_24xx_handle_els(struct s
                                    sess);
                                qlt_send_term_imm_notif(vha, iocb, 1);
                                res = 0;
-                               spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
-                                   flags);
                                break;
                        }
  
@@@ -6297,11 -6292,10 +6290,11 @@@ static void qlt_lport_dump(struct scsi_
  /**
   * qla_tgt_lport_register - register lport with external module
   *
 - * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
 - * @wwpn: Passwd FC target WWPN
 - * @callback:  lport initialization callback for tcm_qla2xxx code
   * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
 + * @phys_wwpn:
 + * @npiv_wwpn:
 + * @npiv_wwnn:
 + * @callback:  lport initialization callback for tcm_qla2xxx code
   */
  int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
                       u64 npiv_wwpn, u64 npiv_wwnn,
index 589634358c83da1e7fc53aa50eb89b74b67d3387,fc2c97d9a0d60de85b6fbf97a2202b3fd0267928..94c14ce94da2477172c9a53252d1ed9a11020e42
@@@ -262,6 -262,24 +262,24 @@@ static struct iscsi_transport qla4xxx_i
  
  static struct scsi_transport_template *qla4xxx_scsi_transport;
  
+ static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+ {
+       u32 reg_val = 0;
+       int rval = QLA_SUCCESS;
+       if (is_qla8022(ha))
+               reg_val = readl(&ha->qla4_82xx_reg->host_status);
+       else if (is_qla8032(ha) || is_qla8042(ha))
+               reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+       else
+               reg_val = readw(&ha->reg->ctrl_status);
+       if (reg_val == QL4_ISP_REG_DISCONNECT)
+               rval = QLA_ERROR;
+       return rval;
+ }
  static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
                             uint32_t iface_type, uint32_t payload_size,
                             uint32_t pid, struct sockaddr *dst_addr)
@@@ -825,10 -843,12 +843,10 @@@ static int qla4xxx_delete_chap(struct S
        uint32_t chap_size;
        int ret = 0;
  
 -      chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
 +      chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
        if (chap_table == NULL)
                return -ENOMEM;
  
 -      memset(chap_table, 0, sizeof(struct ql4_chap_table));
 -
        if (is_qla80XX(ha))
                max_chap_entries = (ha->hw.flt_chap_size / 2) /
                                   sizeof(struct ql4_chap_table);
@@@ -9184,10 -9204,17 +9202,17 @@@ static int qla4xxx_eh_abort(struct scsi
        struct srb *srb = NULL;
        int ret = SUCCESS;
        int wait = 0;
+       int rval;
  
        ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
                   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
  
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
        spin_lock_irqsave(&ha->hardware_lock, flags);
        srb = (struct srb *) CMD_SP(cmd);
        if (!srb) {
@@@ -9239,6 -9266,7 +9264,7 @@@ static int qla4xxx_eh_device_reset(stru
        struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
        struct ddb_entry *ddb_entry = cmd->device->hostdata;
        int ret = FAILED, stat;
+       int rval;
  
        if (!ddb_entry)
                return ret;
                      cmd, jiffies, cmd->request->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
  
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
        /* FIXME: wait for hba to go online */
        stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
        if (stat != QLA_SUCCESS) {
@@@ -9301,6 -9335,7 +9333,7 @@@ static int qla4xxx_eh_target_reset(stru
        struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
        struct ddb_entry *ddb_entry = cmd->device->hostdata;
        int stat, ret;
+       int rval;
  
        if (!ddb_entry)
                return FAILED;
                      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
  
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
        stat = qla4xxx_reset_target(ha, ddb_entry);
        if (stat != QLA_SUCCESS) {
                starget_printk(KERN_INFO, scsi_target(cmd->device),
@@@ -9372,9 -9413,16 +9411,16 @@@ static int qla4xxx_eh_host_reset(struc
  {
        int return_status = FAILED;
        struct scsi_qla_host *ha;
+       int rval;
  
        ha = to_qla_host(cmd->device->host);
  
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
        if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
                qla4_83xx_set_idc_dontreset(ha);
  
index ac3b1c36c478fdca19e6fc37a86aada54e685e0d,ca53a5f785ee239cee9b7de7c10edf75b01ff2d8..946039117bf4f2c655db2bb1129ee75df1f1df21
@@@ -117,12 -117,6 +117,12 @@@ static int scsi_host_eh_past_deadline(s
  /**
   * scmd_eh_abort_handler - Handle command aborts
   * @work:     command to be aborted.
 + *
 + * Note: this function must be called only for a command that has timed out.
 + * Because the block layer marks a request as complete before it calls
 + * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
 + * timed out do not have any effect. Hence it is safe to call
 + * scsi_finish_command() from this function.
   */
  void
  scmd_eh_abort_handler(struct work_struct *work)
@@@ -229,7 -223,8 +229,8 @@@ static void scsi_eh_reset(struct scsi_c
  
  static void scsi_eh_inc_host_failed(struct rcu_head *head)
  {
-       struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+       struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
+       struct Scsi_Host *shost = scmd->device->host;
        unsigned long flags;
  
        spin_lock_irqsave(shost->host_lock, flags);
@@@ -265,7 -260,7 +266,7 @@@ void scsi_eh_scmd_add(struct scsi_cmnd 
         * Ensure that all tasks observe the host state change before the
         * host_failed change.
         */
-       call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
+       call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
  }
  
  /**
@@@ -1894,7 -1889,7 +1895,7 @@@ int scsi_decide_disposition(struct scsi
        }
        return FAILED;
  
 -      maybe_retry:
 +maybe_retry:
  
        /* we requeue for retry because the error was retryable, and
         * the request was not marked fast fail.  Note that above,
diff --combined drivers/scsi/scsi_lib.c
index 393f9db8f41bf0eaa5f106af5722a122d923d112,c9844043504e18de606b07e87e0fc7ccabf65920..1d83f29aee740a4c971017d388e3233bdd2112f5
@@@ -79,15 -79,14 +79,15 @@@ int scsi_init_sense_cache(struct Scsi_H
        if (shost->unchecked_isa_dma) {
                scsi_sense_isadma_cache =
                        kmem_cache_create("scsi_sense_cache(DMA)",
 -                      SCSI_SENSE_BUFFERSIZE, 0,
 -                      SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
 +                              SCSI_SENSE_BUFFERSIZE, 0,
 +                              SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
                if (!scsi_sense_isadma_cache)
                        ret = -ENOMEM;
        } else {
                scsi_sense_cache =
 -                      kmem_cache_create("scsi_sense_cache",
 -                      SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
 +                      kmem_cache_create_usercopy("scsi_sense_cache",
 +                              SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
 +                              0, SCSI_SENSE_BUFFERSIZE, NULL);
                if (!scsi_sense_cache)
                        ret = -ENOMEM;
        }
@@@ -191,19 -190,7 +191,19 @@@ static void __scsi_queue_insert(struct 
         */
        cmd->result = 0;
        if (q->mq_ops) {
 -              scsi_mq_requeue_cmd(cmd);
 +              /*
 +               * Before a SCSI command is dispatched,
 +               * get_device(&sdev->sdev_gendev) is called and the host,
 +               * target and device busy counters are increased. Since
 +               * requeuing a request causes these actions to be repeated and
 +               * since scsi_device_unbusy() has already been called,
 +               * put_device(&device->sdev_gendev) must still be called. Call
 +               * put_device() after blk_mq_requeue_request() to avoid that
 +               * removal of the SCSI device can start before requeueing has
 +               * happened.
 +               */
 +              blk_mq_requeue_request(cmd->request, true);
 +              put_device(&device->sdev_gendev);
                return;
        }
        spin_lock_irqsave(q->queue_lock, flags);
@@@ -683,6 -670,7 +683,7 @@@ static bool scsi_end_request(struct req
        if (!blk_rq_is_scsi(req)) {
                WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
                cmd->flags &= ~SCMD_INITIALIZED;
+               destroy_rcu_head(&cmd->rcu);
        }
  
        if (req->mq_ctx) {
@@@ -732,6 -720,8 +733,8 @@@ static blk_status_t __scsi_error_from_h
                int result)
  {
        switch (host_byte(result)) {
+       case DID_OK:
+               return BLK_STS_OK;
        case DID_TRANSPORT_FAILFAST:
                return BLK_STS_TRANSPORT;
        case DID_TARGET_FAILURE:
@@@ -867,17 -857,6 +870,17 @@@ void scsi_io_completion(struct scsi_cmn
                /* for passthrough error may be set */
                error = BLK_STS_OK;
        }
 +      /*
 +       * Another corner case: the SCSI status byte is non-zero but 'good'.
 +       * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
 +       * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
 +       * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
 +       * intermediate statuses (both obsolete in SAM-4) as good.
 +       */
 +      if (status_byte(result) && scsi_status_is_good(result)) {
 +              result = 0;
 +              error = BLK_STS_OK;
 +      }
  
        /*
         * special case: failed zero length commands always need to
@@@ -1174,6 -1153,7 +1177,7 @@@ static void scsi_initialize_rq(struct r
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  
        scsi_req_init(&cmd->req);
+       init_rcu_head(&cmd->rcu);
        cmd->jiffies_at_alloc = jiffies;
        cmd->retries = 0;
  }
@@@ -2007,8 -1987,6 +2011,8 @@@ static bool scsi_mq_get_budget(struct b
  out_put_device:
        put_device(&sdev->sdev_gendev);
  out:
 +      if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
 +              blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
        return false;
  }
  
@@@ -2070,9 -2048,9 +2074,9 @@@ out_put_budget
        case BLK_STS_OK:
                break;
        case BLK_STS_RESOURCE:
 -              if (atomic_read(&sdev->device_busy) == 0 &&
 -                  !scsi_device_blocked(sdev))
 -                      blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
 +              if (atomic_read(&sdev->device_busy) ||
 +                  scsi_device_blocked(sdev))
 +                      ret = BLK_STS_DEV_RESOURCE;
                break;
        default:
                /*
@@@ -2190,13 -2168,11 +2194,13 @@@ void __scsi_init_queue(struct Scsi_Hos
                q->limits.cluster = 0;
  
        /*
 -       * set a reasonable default alignment on word boundaries: the
 -       * host and device may alter it using
 -       * blk_queue_update_dma_alignment() later.
 +       * Set a reasonable default alignment:  The larger of 32-byte (dword),
 +       * which is a common minimum for HBAs, and the minimum DMA alignment,
 +       * which is set by the platform.
 +       *
 +       * Devices that require a bigger alignment can increase it later.
         */
 -      blk_queue_dma_alignment(q, 0x03);
 +      blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
  }
  EXPORT_SYMBOL_GPL(__scsi_init_queue);
  
@@@ -2630,7 -2606,7 +2634,7 @@@ scsi_test_unit_ready(struct scsi_devic
        /* try to eat the UNIT_ATTENTION if there are enough retries */
        do {
                result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
 -                                        timeout, retries, NULL);
 +                                        timeout, 1, NULL);
                if (sdev->removable && scsi_sense_valid(sshdr) &&
                    sshdr->sense_key == UNIT_ATTENTION)
                        sdev->changed = 1;
diff --combined drivers/scsi/sd.c
index bff21e636dddefba176ff6a435cd23863335c3f8,13ec1b5ef75c3ed208153acef2487f4969dfa198..1fa84d6a0f8b80b4a06af3f0dedede02d7f10403
@@@ -851,13 -851,16 +851,13 @@@ static int sd_setup_write_zeroes_cmnd(s
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
        u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
 -      int ret;
  
        if (!(rq->cmd_flags & REQ_NOUNMAP)) {
                switch (sdkp->zeroing_mode) {
                case SD_ZERO_WS16_UNMAP:
 -                      ret = sd_setup_write_same16_cmnd(cmd, true);
 -                      goto out;
 +                      return sd_setup_write_same16_cmnd(cmd, true);
                case SD_ZERO_WS10_UNMAP:
 -                      ret = sd_setup_write_same10_cmnd(cmd, true);
 -                      goto out;
 +                      return sd_setup_write_same10_cmnd(cmd, true);
                }
        }
  
                return BLKPREP_INVALID;
  
        if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
 -              ret = sd_setup_write_same16_cmnd(cmd, false);
 -      else
 -              ret = sd_setup_write_same10_cmnd(cmd, false);
 -
 -out:
 -      if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
 -              return sd_zbc_write_lock_zone(cmd);
 +              return sd_setup_write_same16_cmnd(cmd, false);
  
 -      return ret;
 +      return sd_setup_write_same10_cmnd(cmd, false);
  }
  
  static void sd_config_write_same(struct scsi_disk *sdkp)
@@@ -955,6 -964,12 +955,6 @@@ static int sd_setup_write_same_cmnd(str
  
        BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
  
 -      if (sd_is_zoned(sdkp)) {
 -              ret = sd_zbc_write_lock_zone(cmd);
 -              if (ret != BLKPREP_OK)
 -                      return ret;
 -      }
 -
        sector >>= ilog2(sdp->sector_size) - 9;
        nr_sectors >>= ilog2(sdp->sector_size) - 9;
  
        ret = scsi_init_io(cmd);
        rq->__data_len = nr_bytes;
  
 -      if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
 -              sd_zbc_write_unlock_zone(cmd);
 -
        return ret;
  }
  
@@@ -1018,12 -1036,19 +1018,12 @@@ static int sd_setup_read_write_cmnd(str
        sector_t threshold;
        unsigned int this_count = blk_rq_sectors(rq);
        unsigned int dif, dix;
 -      bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
        int ret;
        unsigned char protect;
  
 -      if (zoned_write) {
 -              ret = sd_zbc_write_lock_zone(SCpnt);
 -              if (ret != BLKPREP_OK)
 -                      return ret;
 -      }
 -
        ret = scsi_init_io(SCpnt);
        if (ret != BLKPREP_OK)
 -              goto out;
 +              return ret;
        WARN_ON_ONCE(SCpnt != rq->special);
  
        /* from here on until we're complete, any goto out
         */
        ret = BLKPREP_OK;
   out:
 -      if (zoned_write && ret != BLKPREP_OK)
 -              sd_zbc_write_unlock_zone(SCpnt);
 -
        return ret;
  }
  
@@@ -1284,16 -1312,17 +1284,16 @@@ static int sd_init_command(struct scsi_
  static void sd_uninit_command(struct scsi_cmnd *SCpnt)
  {
        struct request *rq = SCpnt->request;
 -
 -      if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
 -              sd_zbc_write_unlock_zone(SCpnt);
 +      u8 *cmnd;
  
        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
                __free_page(rq->special_vec.bv_page);
  
        if (SCpnt->cmnd != scsi_req(rq)->cmd) {
 -              mempool_free(SCpnt->cmnd, sd_cdb_pool);
 +              cmnd = SCpnt->cmnd;
                SCpnt->cmnd = NULL;
                SCpnt->cmd_len = 0;
 +              mempool_free(cmnd, sd_cdb_pool);
        }
  }
  
@@@ -2484,6 -2513,8 +2484,8 @@@ sd_read_capacity(struct scsi_disk *sdkp
                                sector_size = old_sector_size;
                                goto got_data;
                        }
+                       /* Remember that READ CAPACITY(16) succeeded */
+                       sdp->try_rc_10_first = 0;
                }
        }
  
@@@ -2595,6 -2626,7 +2597,7 @@@ sd_read_write_protect_flag(struct scsi_
        int res;
        struct scsi_device *sdp = sdkp->device;
        struct scsi_mode_data data;
+       int disk_ro = get_disk_ro(sdkp->disk);
        int old_wp = sdkp->write_prot;
  
        set_disk_ro(sdkp->disk, 0);
                          "Test WP failed, assume Write Enabled\n");
        } else {
                sdkp->write_prot = ((data.device_specific & 0x80) != 0);
-               set_disk_ro(sdkp->disk, sdkp->write_prot);
+               set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
                if (sdkp->first_scan || old_wp != sdkp->write_prot) {
                        sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
                                  sdkp->write_prot ? "on" : "off");
diff --combined drivers/scsi/sd_zbc.c
index 8f3669fd490d222d49be25e13912f5816d0b84df,9049a189c8e5f7035da8f0b6d92373548400f041..41df75eea57be7362e1be8f737d1bae9bac91977
@@@ -229,6 -229,17 +229,6 @@@ static inline sector_t sd_zbc_zone_sect
        return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
  }
  
 -/**
 - * sd_zbc_zone_no - Get the number of the zone conataining a sector.
 - * @sdkp: The target disk
 - * @sector: 512B sector address contained in the zone
 - */
 -static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
 -                                        sector_t sector)
 -{
 -      return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
 -}
 -
  /**
   * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
   * @cmd: the command to setup
@@@ -267,6 -278,78 +267,6 @@@ int sd_zbc_setup_reset_cmnd(struct scsi
        return BLKPREP_OK;
  }
  
 -/**
 - * sd_zbc_write_lock_zone - Write lock a sequential zone.
 - * @cmd: write command
 - *
 - * Called from sd_init_cmd() for write requests (standard write, write same or
 - * write zeroes operations). If the request target zone is not already locked,
 - * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
 - * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
 - * forcing the request to wait for the zone to be unlocked, that is, for the
 - * previously issued write request targeting the same zone to complete.
 - *
 - * This is called from blk_peek_request() context with the queue lock held and
 - * before the request is removed from the scheduler. As a result, multiple
 - * contexts executing concurrently scsi_request_fn() cannot result in write
 - * sequence reordering as only a single write request per zone is allowed to
 - * proceed.
 - */
 -int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
 -{
 -      struct request *rq = cmd->request;
 -      struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
 -      sector_t sector = blk_rq_pos(rq);
 -      sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
 -      unsigned int zno = sd_zbc_zone_no(sdkp, sector);
 -
 -      /*
 -       * Note: Checks of the alignment of the write command on
 -       * logical blocks is done in sd.c
 -       */
 -
 -      /* Do not allow zone boundaries crossing on host-managed drives */
 -      if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
 -          (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
 -              return BLKPREP_KILL;
 -
 -      /*
 -       * Do not issue more than one write at a time per
 -       * zone. This solves write ordering problems due to
 -       * the unlocking of the request queue in the dispatch
 -       * path in the non scsi-mq case.
 -       */
 -      if (sdkp->zones_wlock &&
 -          test_and_set_bit(zno, sdkp->zones_wlock))
 -              return BLKPREP_DEFER;
 -
 -      WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
 -      cmd->flags |= SCMD_ZONE_WRITE_LOCK;
 -
 -      return BLKPREP_OK;
 -}
 -
 -/**
 - * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
 - * @cmd: write command
 - *
 - * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
 - * dispatching the next write request for the zone.
 - */
 -void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
 -{
 -      struct request *rq = cmd->request;
 -      struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
 -
 -      if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
 -              unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
 -              WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
 -              cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
 -              clear_bit_unlock(zno, sdkp->zones_wlock);
 -              smp_mb__after_atomic();
 -      }
 -}
 -
  /**
   * sd_zbc_complete - ZBC command post processing.
   * @cmd: Completed command
@@@ -403,7 -486,7 +403,7 @@@ static int sd_zbc_check_capacity(struc
   */
  static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
  {
-       u64 zone_blocks;
+       u64 zone_blocks = 0;
        sector_t block = 0;
        unsigned char *buf;
        unsigned char *rec;
  
        /* Do a report zone to get the same field */
        ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
-       if (ret) {
-               zone_blocks = 0;
-               goto out;
-       }
+       if (ret)
+               goto out_free;
  
        same = buf[4] & 0x0f;
        if (same > 0) {
                        ret = sd_zbc_report_zones(sdkp, buf,
                                                  SD_ZBC_BUF_SIZE, block);
                        if (ret)
-                               return ret;
+                               goto out_free;
                }
  
        } while (block < sdkp->capacity);
        zone_blocks = sdkp->zone_blocks;
  
  out:
-       kfree(buf);
        if (!zone_blocks) {
                if (sdkp->first_scan)
                        sd_printk(KERN_NOTICE, sdkp,
                                  "Devices with non constant zone "
                                  "size are not supported\n");
-               return -ENODEV;
-       }
-       if (!is_power_of_2(zone_blocks)) {
+               ret = -ENODEV;
+       } else if (!is_power_of_2(zone_blocks)) {
                if (sdkp->first_scan)
                        sd_printk(KERN_NOTICE, sdkp,
                                  "Devices with non power of 2 zone "
                                  "size are not supported\n");
-               return -ENODEV;
-       }
-       if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+               ret = -ENODEV;
+       } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
                if (sdkp->first_scan)
                        sd_printk(KERN_NOTICE, sdkp,
                                  "Zone size too large\n");
-               return -ENODEV;
+               ret = -ENODEV;
+       } else {
+               sdkp->zone_blocks = zone_blocks;
+               sdkp->zone_shift = ilog2(zone_blocks);
        }
  
-       sdkp->zone_blocks = zone_blocks;
-       sdkp->zone_shift = ilog2(zone_blocks);
+ out_free:
+       kfree(buf);
  
-       return 0;
+       return ret;
  }
  
 +/**
 + * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
 + * @sdkp: The disk of the bitmap
 + */
 +static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
 +{
 +      struct request_queue *q = sdkp->disk->queue;
 +
 +      return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
 +                          * sizeof(unsigned long),
 +                          GFP_KERNEL, q->node);
 +}
 +
 +/**
 + * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
 + * @sdkp: disk used
 + * @buf: report reply buffer
 + * @buflen: length of @buf
 + * @seq_zones_bitmap: bitmap of sequential zones to set
 + *
 + * Parse reported zone descriptors in @buf to identify sequential zones and
 + * set the reported zone bit in @seq_zones_bitmap accordingly.
 + * Since read-only and offline zones cannot be written, do not
 + * mark them as sequential in the bitmap.
 + * Return the LBA after the last zone reported.
 + */
 +static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
 +                                   unsigned int buflen,
 +                                   unsigned long *seq_zones_bitmap)
 +{
 +      sector_t lba, next_lba = sdkp->capacity;
 +      unsigned int buf_len, list_length;
 +      unsigned char *rec;
 +      u8 type, cond;
 +
 +      list_length = get_unaligned_be32(&buf[0]) + 64;
 +      buf_len = min(list_length, buflen);
 +      rec = buf + 64;
 +
 +      while (rec < buf + buf_len) {
 +              type = rec[0] & 0x0f;
 +              cond = (rec[1] >> 4) & 0xf;
 +              lba = get_unaligned_be64(&rec[16]);
 +              if (type != ZBC_ZONE_TYPE_CONV &&
 +                  cond != ZBC_ZONE_COND_READONLY &&
 +                  cond != ZBC_ZONE_COND_OFFLINE)
 +                      set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
 +              next_lba = lba + get_unaligned_be64(&rec[8]);
 +              rec += 64;
 +      }
 +
 +      return next_lba;
 +}
 +
 +/**
 + * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
 + * @sdkp: target disk
 + *
 + * Allocate a zone bitmap and initialize it by identifying sequential zones.
 + */
 +static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
 +{
 +      struct request_queue *q = sdkp->disk->queue;
 +      unsigned long *seq_zones_bitmap;
 +      sector_t lba = 0;
 +      unsigned char *buf;
 +      int ret = -ENOMEM;
 +
 +      seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
 +      if (!seq_zones_bitmap)
 +              return -ENOMEM;
 +
 +      buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
 +      if (!buf)
 +              goto out;
 +
 +      while (lba < sdkp->capacity) {
 +              ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
 +              if (ret)
 +                      goto out;
 +              lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
 +                                         seq_zones_bitmap);
 +      }
 +
 +      if (lba != sdkp->capacity) {
 +              /* Something went wrong */
 +              ret = -EIO;
 +      }
 +
 +out:
 +      kfree(buf);
 +      if (ret) {
 +              kfree(seq_zones_bitmap);
 +              return ret;
 +      }
 +
 +      q->seq_zones_bitmap = seq_zones_bitmap;
 +
 +      return 0;
 +}
 +
 +static void sd_zbc_cleanup(struct scsi_disk *sdkp)
 +{
 +      struct request_queue *q = sdkp->disk->queue;
 +
 +      kfree(q->seq_zones_bitmap);
 +      q->seq_zones_bitmap = NULL;
 +
 +      kfree(q->seq_zones_wlock);
 +      q->seq_zones_wlock = NULL;
 +
 +      q->nr_zones = 0;
 +}
 +
  static int sd_zbc_setup(struct scsi_disk *sdkp)
  {
 +      struct request_queue *q = sdkp->disk->queue;
 +      int ret;
  
        /* READ16/WRITE16 is mandatory for ZBC disks */
        sdkp->device->use_16_for_rw = 1;
        sdkp->nr_zones =
                round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
  
 -      if (!sdkp->zones_wlock) {
 -              sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
 -                                          sizeof(unsigned long),
 -                                          GFP_KERNEL);
 -              if (!sdkp->zones_wlock)
 -                      return -ENOMEM;
 +      /*
 +       * Initialize the device request queue information if the number
 +       * of zones changed.
 +       */
 +      if (sdkp->nr_zones != q->nr_zones) {
 +
 +              sd_zbc_cleanup(sdkp);
 +
 +              q->nr_zones = sdkp->nr_zones;
 +              if (sdkp->nr_zones) {
 +                      q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
 +                      if (!q->seq_zones_wlock) {
 +                              ret = -ENOMEM;
 +                              goto err;
 +                      }
 +
 +                      ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
 +                      if (ret) {
 +                              sd_zbc_cleanup(sdkp);
 +                              goto err;
 +                      }
 +              }
 +
        }
  
        return 0;
 +
 +err:
 +      sd_zbc_cleanup(sdkp);
 +      return ret;
  }
  
  int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
  
  err:
        sdkp->capacity = 0;
 +      sd_zbc_cleanup(sdkp);
  
        return ret;
  }
  
  void sd_zbc_remove(struct scsi_disk *sdkp)
  {
 -      kfree(sdkp->zones_wlock);
 -      sdkp->zones_wlock = NULL;
 +      sd_zbc_cleanup(sdkp);
  }
  
  void sd_zbc_print_zones(struct scsi_disk *sdkp)
index 40fc7a590e81b6f9a6749dd4945a7f134e69364d,620510787763919a5b115aa1eef46f8e84aa4839..8c51d628b52edfd7e891182919fab16b469b0f3c
@@@ -953,11 -953,10 +953,11 @@@ static void storvsc_handle_error(struc
                case TEST_UNIT_READY:
                        break;
                default:
 -                      set_host_byte(scmnd, DID_TARGET_FAILURE);
 +                      set_host_byte(scmnd, DID_ERROR);
                }
                break;
        case SRB_STATUS_INVALID_LUN:
 +              set_host_byte(scmnd, DID_NO_CONNECT);
                do_work = true;
                process_err_fn = storvsc_remove_lun;
                break;
@@@ -1311,7 -1310,8 +1311,8 @@@ static int storvsc_do_io(struct hv_devi
                         */
                        cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
                                    cpumask_of_node(cpu_to_node(q_num)));
-                       for_each_cpu(tgt_cpu, &alloced_mask) {
+                       for_each_cpu_wrap(tgt_cpu, &alloced_mask,
+                                       outgoing_channel->target_cpu + 1) {
                                if (tgt_cpu != outgoing_channel->target_cpu) {
                                        outgoing_channel =
                                        stor_device->stor_chns[tgt_cpu];
@@@ -1657,7 -1657,7 +1658,7 @@@ static struct scsi_host_template scsi_d
        .eh_timed_out =         storvsc_eh_timed_out,
        .slave_alloc =          storvsc_device_alloc,
        .slave_configure =      storvsc_device_configure,
-       .cmd_per_lun =          255,
+       .cmd_per_lun =          2048,
        .this_id =              -1,
        .use_clustering =       ENABLE_CLUSTERING,
        /* Make sure we dont get a sg segment crosses a page boundary */
index facee2b97926337f5b6d4abd1f256f4979c6d525,8196976182c9201ba380495a6e3761f0703cabfb..c5b1bf1cadcb043390b3e8c6657f38692bea0774
  #include <linux/devfreq.h>
  #include <linux/nls.h>
  #include <linux/of.h>
 +#include <linux/bitfield.h>
  #include "ufshcd.h"
  #include "ufs_quirks.h"
  #include "unipro.h"
 +#include "ufs-sysfs.h"
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/ufs.h>
@@@ -152,7 -150,7 +152,7 @@@ enum 
  #define ufshcd_is_ufs_dev_poweroff(h) \
        ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
  
 -static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
 +struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
        {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
        {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
        {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
@@@ -525,7 -523,7 +525,7 @@@ int ufshcd_wait_for_register(struct ufs
  
  /**
   * ufshcd_get_intr_mask - Get the interrupt bit mask
 - * @hba - Pointer to adapter instance
 + * @hba: Pointer to adapter instance
   *
   * Returns interrupt bit mask per version
   */
@@@ -552,7 -550,7 +552,7 @@@ static inline u32 ufshcd_get_intr_mask(
  
  /**
   * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
 - * @hba - Pointer to adapter instance
 + * @hba: Pointer to adapter instance
   *
   * Returns UFSHCI version supported by the controller
   */
@@@ -579,7 -577,7 +579,7 @@@ static inline bool ufshcd_is_device_pre
  
  /**
   * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
 - * @lrb: pointer to local command reference block
 + * @lrbp: pointer to local command reference block
   *
   * This function is used to get the OCS field from UTRD
   * Returns the OCS field in the UTRD
@@@ -815,6 -813,28 +815,6 @@@ static inline bool ufshcd_is_hba_active
                ? false : true;
  }
  
 -static const char *ufschd_uic_link_state_to_string(
 -                      enum uic_link_state state)
 -{
 -      switch (state) {
 -      case UIC_LINK_OFF_STATE:        return "OFF";
 -      case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
 -      case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
 -      default:                        return "UNKNOWN";
 -      }
 -}
 -
 -static const char *ufschd_ufs_dev_pwr_mode_to_string(
 -                      enum ufs_dev_pwr_mode state)
 -{
 -      switch (state) {
 -      case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
 -      case UFS_SLEEP_PWR_MODE:        return "SLEEP";
 -      case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
 -      default:                        return "UNKNOWN";
 -      }
 -}
 -
  u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
  {
        /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
@@@ -1739,7 -1759,7 +1739,7 @@@ void ufshcd_send_command(struct ufs_hb
  
  /**
   * ufshcd_copy_sense_data - Copy sense data in case of check condition
 - * @lrb - pointer to local reference block
 + * @lrbp: pointer to local reference block
   */
  static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  {
   * ufshcd_copy_query_response() - Copy the Query Response and the data
   * descriptor
   * @hba: per adapter instance
 - * @lrb - pointer to local reference block
 + * @lrbp: pointer to local reference block
   */
  static
  int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
@@@ -1862,7 -1882,7 +1862,7 @@@ ufshcd_dispatch_uic_cmd(struct ufs_hba 
  /**
   * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
   * @hba: per adapter instance
 - * @uic_command: UIC command
 + * @uic_cmd: UIC command
   *
   * Must be called with mutex held.
   * Returns 0 only if success.
@@@ -1945,8 -1965,7 +1945,8 @@@ ufshcd_send_uic_cmd(struct ufs_hba *hba
  
  /**
   * ufshcd_map_sg - Map scatter-gather list to prdt
 - * @lrbp - pointer to local reference block
 + * @hba: per adapter instance
 + * @lrbp: pointer to local reference block
   *
   * Returns 0 in case of success, non-zero value in case of failure
   */
@@@ -2082,8 -2101,8 +2082,8 @@@ static void ufshcd_prepare_req_desc_hdr
  /**
   * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
   * for scsi commands
 - * @lrbp - local reference block pointer
 - * @upiu_flags - flags
 + * @lrbp: local reference block pointer
 + * @upiu_flags: flags
   */
  static
  void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
@@@ -2171,8 -2190,8 +2171,8 @@@ static inline void ufshcd_prepare_utp_n
  /**
   * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
   *                         for Device Management Purposes
 - * @hba - per adapter instance
 - * @lrb - pointer to local reference block
 + * @hba: per adapter instance
 + * @lrbp: pointer to local reference block
   */
  static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  {
  /**
   * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
   *                       for SCSI Purposes
 - * @hba - per adapter instance
 - * @lrb - pointer to local reference block
 + * @hba: per adapter instance
 + * @lrbp: pointer to local reference block
   */
  static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  {
        return ret;
  }
  
 -/*
 - * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
 - * @scsi_lun: scsi LUN id
 - *
 - * Returns UPIU LUN id
 - */
 -static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
 -{
 -      if (scsi_is_wlun(scsi_lun))
 -              return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
 -                      | UFS_UPIU_WLUN_ID;
 -      else
 -              return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
 -}
 -
  /**
   * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
 - * @scsi_lun: UPIU W-LUN id
 + * @upiu_wlun_id: UPIU W-LUN id
   *
   * Returns SCSI W-LUN id
   */
@@@ -2237,8 -2271,8 +2237,8 @@@ static inline u16 ufshcd_upiu_wlun_to_s
  
  /**
   * ufshcd_queuecommand - main entry point for SCSI requests
 + * @host: SCSI host pointer
   * @cmd: command from SCSI Midlayer
 - * @done: call back function
   *
   * Returns 0 for success, non-zero in case of failure
   */
@@@ -2479,7 -2513,7 +2479,7 @@@ static int ufshcd_wait_for_dev_cmd(stru
  /**
   * ufshcd_get_dev_cmd_tag - Get device management command tag
   * @hba: per-adapter instance
 - * @tag: pointer to variable with available slot value
 + * @tag_out: pointer to variable with available slot value
   *
   * Get a free slot and lock it until device management command
   * completes.
@@@ -2516,9 -2550,9 +2516,9 @@@ static inline void ufshcd_put_dev_cmd_t
  
  /**
   * ufshcd_exec_dev_cmd - API for sending device management requests
 - * @hba - UFS hba
 - * @cmd_type - specifies the type (NOP, Query...)
 - * @timeout - time in seconds
 + * @hba: UFS hba
 + * @cmd_type: specifies the type (NOP, Query...)
 + * @timeout: time in seconds
   *
   * NOTE: Since there is only one available tag for device management commands,
   * it is expected you hold the hba->dev_cmd.lock mutex.
@@@ -2615,10 -2649,10 +2615,10 @@@ static int ufshcd_query_flag_retry(stru
  
  /**
   * ufshcd_query_flag() - API function for sending flag query requests
 - * hba: per-adapter instance
 - * query_opcode: flag query to perform
 - * idn: flag idn to access
 - * flag_res: the flag value after the query request completes
 + * @hba: per-adapter instance
 + * @opcode: flag query to perform
 + * @idn: flag idn to access
 + * @flag_res: the flag value after the query request completes
   *
   * Returns 0 for success, non-zero in case of failure
   */
@@@ -2682,17 -2716,17 +2682,17 @@@ out_unlock
  
  /**
   * ufshcd_query_attr - API function for sending attribute requests
 - * hba: per-adapter instance
 - * opcode: attribute opcode
 - * idn: attribute idn to access
 - * index: index field
 - * selector: selector field
 - * attr_val: the attribute value after the query request completes
 + * @hba: per-adapter instance
 + * @opcode: attribute opcode
 + * @idn: attribute idn to access
 + * @index: index field
 + * @selector: selector field
 + * @attr_val: the attribute value after the query request completes
   *
   * Returns 0 for success, non-zero in case of failure
  */
 -static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 -                      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
 +int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 +                    enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  {
        struct ufs_query_req *request = NULL;
        struct ufs_query_res *response = NULL;
  }
  
  /**
 - * ufshcd_query_descriptor_retry - API function for sending descriptor
 - * requests
 - * hba: per-adapter instance
 - * opcode: attribute opcode
 - * idn: attribute idn to access
 - * index: index field
 - * selector: selector field
 - * desc_buf: the buffer that contains the descriptor
 - * buf_len: length parameter passed to the device
 + * ufshcd_query_descriptor_retry - API function for sending descriptor requests
 + * @hba: per-adapter instance
 + * @opcode: attribute opcode
 + * @idn: attribute idn to access
 + * @index: index field
 + * @selector: selector field
 + * @desc_buf: the buffer that contains the descriptor
 + * @buf_len: length parameter passed to the device
   *
   * Returns 0 for success, non-zero in case of failure.
   * The buf_len parameter will contain, on return, the length parameter
   * received on the response.
   */
 -static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
 -                                       enum query_opcode opcode,
 -                                       enum desc_idn idn, u8 index,
 -                                       u8 selector,
 -                                       u8 *desc_buf, int *buf_len)
 +int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
 +                                enum query_opcode opcode,
 +                                enum desc_idn idn, u8 index,
 +                                u8 selector,
 +                                u8 *desc_buf, int *buf_len)
  {
        int err;
        int retries;
@@@ -2952,9 -2987,6 +2952,9 @@@ int ufshcd_map_desc_id_to_length(struc
        case QUERY_DESC_IDN_STRING:
                *desc_len = QUERY_DESC_MAX_SIZE;
                break;
 +      case QUERY_DESC_IDN_HEALTH:
 +              *desc_len = hba->desc_size.hlth_desc;
 +              break;
        case QUERY_DESC_IDN_RFU_0:
        case QUERY_DESC_IDN_RFU_1:
                *desc_len = 0;
@@@ -2978,12 -3010,12 +2978,12 @@@ EXPORT_SYMBOL(ufshcd_map_desc_id_to_len
   *
   * Return 0 in case of success, non-zero otherwise
   */
 -static int ufshcd_read_desc_param(struct ufs_hba *hba,
 -                                enum desc_idn desc_id,
 -                                int desc_index,
 -                                u8 param_offset,
 -                                u8 *param_read_buf,
 -                                u8 param_size)
 +int ufshcd_read_desc_param(struct ufs_hba *hba,
 +                         enum desc_idn desc_id,
 +                         int desc_index,
 +                         u8 param_offset,
 +                         u8 *param_read_buf,
 +                         u8 param_size)
  {
        int ret;
        u8 *desc_buf;
@@@ -3078,8 -3110,9 +3078,8 @@@ static int ufshcd_read_device_desc(stru
   *
   * Return 0 in case of success, non-zero otherwise
   */
 -#define ASCII_STD true
 -static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
 -                                 u8 *buf, u32 size, bool ascii)
 +int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
 +                          u8 *buf, u32 size, bool ascii)
  {
        int err = 0;
  
@@@ -3156,7 -3189,7 +3156,7 @@@ static inline int ufshcd_read_unit_desc
         * Unit descriptors are only available for general purpose LUs (LUN id
         * from 0 to 7) and RPMB Well known LU.
         */
 -      if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
 +      if (!ufs_is_valid_unit_desc_lun(lun))
                return -EOPNOTSUPP;
  
        return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@@ -3709,18 -3742,6 +3709,18 @@@ static int ufshcd_uic_hibern8_exit(stru
        return ret;
  }
  
 +static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
 +{
 +      unsigned long flags;
 +
 +      if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
 +              return;
 +
 +      spin_lock_irqsave(hba->host->host_lock, flags);
 +      ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
 +      spin_unlock_irqrestore(hba->host->host_lock, flags);
 +}
 +
   /**
   * ufshcd_init_pwr_info - setting the POR (power on reset)
   * values in hba power info
@@@ -3891,7 -3912,7 +3891,7 @@@ static int ufshcd_config_pwr_mode(struc
  
  /**
   * ufshcd_complete_dev_init() - checks device readiness
 - * hba: per-adapter instance
 + * @hba: per-adapter instance
   *
   * Set fDeviceInit flag and poll until device toggles it.
   */
@@@ -4331,6 -4352,8 +4331,8 @@@ static int ufshcd_slave_alloc(struct sc
        /* REPORT SUPPORTED OPERATION CODES is not supported */
        sdev->no_report_opcodes = 1;
  
+       /* WRITE_SAME command is not supported */
+       sdev->no_write_same = 1;
  
        ufshcd_set_queue_depth(sdev);
  
@@@ -4430,7 -4453,7 +4432,7 @@@ static int ufshcd_task_req_compl(struc
  
  /**
   * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
 - * @lrb: pointer to local reference block of completed command
 + * @lrbp: pointer to local reference block of completed command
   * @scsi_status: SCSI command status
   *
   * Returns value base on SCSI command status
@@@ -4465,7 -4488,7 +4467,7 @@@ ufshcd_scsi_cmd_status(struct ufshcd_lr
  /**
   * ufshcd_transfer_rsp_status - Get overall status of the response
   * @hba: per adapter instance
 - * @lrb: pointer to local reference block of completed command
 + * @lrbp: pointer to local reference block of completed command
   *
   * Returns result of the command to notify SCSI midlayer
   */
@@@ -5773,7 -5796,7 +5775,7 @@@ static int ufshcd_reset_and_restore(str
  
  /**
   * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
 - * @cmd - SCSI command pointer
 + * @cmd: SCSI command pointer
   *
   * Returns SUCCESS/FAILED
   */
@@@ -5958,11 -5981,11 +5960,11 @@@ static void ufshcd_init_icc_levels(stru
   * will take effect only when its sent to "UFS device" well known logical unit
   * hence we require the scsi_device instance to represent this logical unit in
   * order for the UFS host driver to send the SSU command for power management.
 -
 + *
   * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
   * Block) LU so user space process can control this LU. User space may also
   * want to have access to BOOT LU.
 -
 + *
   * This function adds scsi device instances for each of all well known LUs
   * (except "REPORT LUNS" LU).
   *
@@@ -6031,7 -6054,7 +6033,7 @@@ static int ufs_get_device_desc(struct u
        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
  
        err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
 -                              QUERY_DESC_MAX_SIZE, ASCII_STD);
 +                                    QUERY_DESC_MAX_SIZE, true/*ASCII*/);
        if (err) {
                dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
                        __func__, err);
@@@ -6277,10 -6300,6 +6279,10 @@@ static void ufshcd_init_desc_sizes(stru
                &hba->desc_size.geom_desc);
        if (err)
                hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
 +      err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
 +              &hba->desc_size.hlth_desc);
 +      if (err)
 +              hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
  }
  
  static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
        hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
        hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
        hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
 +      hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
  }
  
  /**
@@@ -6320,9 -6338,6 +6322,9 @@@ static int ufshcd_probe_hba(struct ufs_
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
  
 +      /* Enable Auto-Hibernate if configured */
 +      ufshcd_auto_hibern8_enable(hba);
 +
        ret = ufshcd_verify_dev_init(hba);
        if (ret)
                goto out;
@@@ -6481,12 -6496,6 +6483,12 @@@ static enum blk_eh_timer_return ufshcd_
        return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
  }
  
 +static const struct attribute_group *ufshcd_driver_groups[] = {
 +      &ufs_sysfs_unit_descriptor_group,
 +      &ufs_sysfs_lun_attributes_group,
 +      NULL,
 +};
 +
  static struct scsi_host_template ufshcd_driver_template = {
        .module                 = THIS_MODULE,
        .name                   = UFSHCD,
        .can_queue              = UFSHCD_CAN_QUEUE,
        .max_host_blocked       = 1,
        .track_queue_depth      = 1,
 +      .sdev_groups            = ufshcd_driver_groups,
  };
  
  static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@@ -6553,15 -6561,12 +6555,15 @@@ static int ufshcd_config_vreg(struct de
                struct ufs_vreg *vreg, bool on)
  {
        int ret = 0;
 -      struct regulator *reg = vreg->reg;
 -      const char *name = vreg->name;
 +      struct regulator *reg;
 +      const char *name;
        int min_uV, uA_load;
  
        BUG_ON(!vreg);
  
 +      reg = vreg->reg;
 +      name = vreg->name;
 +
        if (regulator_count_voltages(reg) > 0) {
                min_uV = on ? vreg->min_uV : 0;
                ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@@@ -7407,10 -7412,6 +7409,10 @@@ static int ufshcd_resume(struct ufs_hb
  
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
 +
 +      /* Enable Auto-Hibernate if configured */
 +      ufshcd_auto_hibern8_enable(hba);
 +
        goto out;
  
  set_old_link_state:
@@@ -7432,6 -7433,7 +7434,6 @@@ out
  /**
   * ufshcd_system_suspend - system suspend routine
   * @hba: per adapter instance
 - * @pm_op: runtime PM or system PM
   *
   * Check the description of ufshcd_suspend() function for more details.
   *
@@@ -7582,6 -7584,133 +7584,6 @@@ int ufshcd_runtime_idle(struct ufs_hba 
  }
  EXPORT_SYMBOL(ufshcd_runtime_idle);
  
 -static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
 -                                         struct device_attribute *attr,
 -                                         const char *buf, size_t count,
 -                                         bool rpm)
 -{
 -      struct ufs_hba *hba = dev_get_drvdata(dev);
 -      unsigned long flags, value;
 -
 -      if (kstrtoul(buf, 0, &value))
 -              return -EINVAL;
 -
 -      if (value >= UFS_PM_LVL_MAX)
 -              return -EINVAL;
 -
 -      spin_lock_irqsave(hba->host->host_lock, flags);
 -      if (rpm)
 -              hba->rpm_lvl = value;
 -      else
 -              hba->spm_lvl = value;
 -      spin_unlock_irqrestore(hba->host->host_lock, flags);
 -      return count;
 -}
 -
 -static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
 -              struct device_attribute *attr, char *buf)
 -{
 -      struct ufs_hba *hba = dev_get_drvdata(dev);
 -      int curr_len;
 -      u8 lvl;
 -
 -      curr_len = snprintf(buf, PAGE_SIZE,
 -                          "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
 -                          hba->rpm_lvl,
 -                          ufschd_ufs_dev_pwr_mode_to_string(
 -                              ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
 -                          ufschd_uic_link_state_to_string(
 -                              ufs_pm_lvl_states[hba->rpm_lvl].link_state));
 -
 -      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 -                           "\nAll available Runtime PM levels info:\n");
 -      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 -              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 -                                   "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
 -                                  lvl,
 -                                  ufschd_ufs_dev_pwr_mode_to_string(
 -                                      ufs_pm_lvl_states[lvl].dev_state),
 -                                  ufschd_uic_link_state_to_string(
 -                                      ufs_pm_lvl_states[lvl].link_state));
 -
 -      return curr_len;
 -}
 -
 -static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
 -              struct device_attribute *attr, const char *buf, size_t count)
 -{
 -      return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
 -}
 -
 -static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
 -{
 -      hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
 -      hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
 -      sysfs_attr_init(&hba->rpm_lvl_attr.attr);
 -      hba->rpm_lvl_attr.attr.name = "rpm_lvl";
 -      hba->rpm_lvl_attr.attr.mode = 0644;
 -      if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
 -              dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
 -}
 -
 -static ssize_t ufshcd_spm_lvl_show(struct device *dev,
 -              struct device_attribute *attr, char *buf)
 -{
 -      struct ufs_hba *hba = dev_get_drvdata(dev);
 -      int curr_len;
 -      u8 lvl;
 -
 -      curr_len = snprintf(buf, PAGE_SIZE,
 -                          "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
 -                          hba->spm_lvl,
 -                          ufschd_ufs_dev_pwr_mode_to_string(
 -                              ufs_pm_lvl_states[hba->spm_lvl].dev_state),
 -                          ufschd_uic_link_state_to_string(
 -                              ufs_pm_lvl_states[hba->spm_lvl].link_state));
 -
 -      curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 -                           "\nAll available System PM levels info:\n");
 -      for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
 -              curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
 -                                   "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
 -                                  lvl,
 -                                  ufschd_ufs_dev_pwr_mode_to_string(
 -                                      ufs_pm_lvl_states[lvl].dev_state),
 -                                  ufschd_uic_link_state_to_string(
 -                                      ufs_pm_lvl_states[lvl].link_state));
 -
 -      return curr_len;
 -}
 -
 -static ssize_t ufshcd_spm_lvl_store(struct device *dev,
 -              struct device_attribute *attr, const char *buf, size_t count)
 -{
 -      return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
 -}
 -
 -static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
 -{
 -      hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
 -      hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
 -      sysfs_attr_init(&hba->spm_lvl_attr.attr);
 -      hba->spm_lvl_attr.attr.name = "spm_lvl";
 -      hba->spm_lvl_attr.attr.mode = 0644;
 -      if (device_create_file(hba->dev, &hba->spm_lvl_attr))
 -              dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
 -}
 -
 -static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
 -{
 -      ufshcd_add_rpm_lvl_sysfs_nodes(hba);
 -      ufshcd_add_spm_lvl_sysfs_nodes(hba);
 -}
 -
 -static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
 -{
 -      device_remove_file(hba->dev, &hba->rpm_lvl_attr);
 -      device_remove_file(hba->dev, &hba->spm_lvl_attr);
 -}
 -
  /**
   * ufshcd_shutdown - shutdown routine
   * @hba: per adapter instance
@@@ -7615,11 -7744,11 +7617,11 @@@ EXPORT_SYMBOL(ufshcd_shutdown)
  /**
   * ufshcd_remove - de-allocate SCSI host and host memory space
   *            data structure memory
 - * @hba - per adapter instance
 + * @hba: per adapter instance
   */
  void ufshcd_remove(struct ufs_hba *hba)
  {
 -      ufshcd_remove_sysfs_nodes(hba);
 +      ufs_sysfs_remove_nodes(hba->dev);
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
@@@ -7854,12 -7983,6 +7856,12 @@@ int ufshcd_init(struct ufs_hba *hba, vo
                                                UFS_SLEEP_PWR_MODE,
                                                UIC_LINK_HIBERN8_STATE);
  
 +      /* Set the default auto-hiberate idle timer value to 150 ms */
 +      if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
 +              hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
 +                          FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
 +      }
 +
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
  
        ufshcd_set_ufs_dev_active(hba);
  
        async_schedule(ufshcd_async_scan, hba);
 -      ufshcd_add_sysfs_nodes(hba);
 +      ufs_sysfs_add_nodes(hba->dev);
  
        return 0;
  
diff --combined include/scsi/scsi_cmnd.h
index d8d4a902a88dedbc93ac8da1ca99bb5f3d394d65,0382ceab2ebab8873647e6bff2d2c68c72ec9576..2280b2351739572c5db73579f8ffc0e16d511ebe
@@@ -58,7 -58,8 +58,7 @@@ struct scsi_pointer 
  /* for scmd->flags */
  #define SCMD_TAGGED           (1 << 0)
  #define SCMD_UNCHECKED_ISA_DMA        (1 << 1)
 -#define SCMD_ZONE_WRITE_LOCK  (1 << 2)
 -#define SCMD_INITIALIZED      (1 << 3)
 +#define SCMD_INITIALIZED      (1 << 2)
  /* flags preserved across unprep / reprep */
  #define SCMD_PRESERVED_FLAGS  (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
  
@@@ -68,6 -69,9 +68,9 @@@ struct scsi_cmnd 
        struct list_head list;  /* scsi_cmnd participates in queue lists */
        struct list_head eh_entry; /* entry for the host eh_cmd_q */
        struct delayed_work abort_work;
+       struct rcu_head rcu;
        int eh_eflags;          /* Used by error handlr */
  
        /*
diff --combined include/scsi/scsi_host.h
index 4e418fb539f8835254705f3e40a0a312f4829117,9c1e4bad6581d39bb4bcafc9205b38ed43e541cf..12f454cb6f610823515aaed861eb9d3df97c6d90
@@@ -51,6 -51,21 +51,6 @@@ struct scsi_host_template 
        struct module *module;
        const char *name;
  
 -      /*
 -       * Used to initialize old-style drivers.  For new-style drivers
 -       * just perform all work in your module initialization function.
 -       *
 -       * Status:  OBSOLETE
 -       */
 -      int (* detect)(struct scsi_host_template *);
 -
 -      /*
 -       * Used as unload callback for hosts with old-style drivers.
 -       *
 -       * Status: OBSOLETE
 -       */
 -      int (* release)(struct Scsi_Host *);
 -
        /*
         * The info function will return whatever useful information the
         * developer sees fit.  If not provided, then the name field will
        /* True if the controller does not support WRITE SAME */
        unsigned no_write_same:1;
  
+       /* True if the low-level driver supports blk-mq only */
+       unsigned force_blk_mq:1;
        /*
         * Countdown for host blocking with no commands outstanding.
         */
        struct device_attribute **sdev_attrs;
  
        /*
 -       * List of hosts per template.
 -       *
 -       * This is only for use by scsi_module.c for legacy templates.
 -       * For these access to it is synchronized implicitly by
 -       * module_init/module_exit.
 +       * Pointer to the SCSI device attribute groups for this host,
 +       * NULL terminated.
         */
 -      struct list_head legacy_hosts;
 +      const struct attribute_group **sdev_groups;
  
        /*
         * Vendor Identifier associated with the host
@@@ -553,8 -574,6 +556,6 @@@ struct Scsi_Host 
                struct blk_mq_tag_set   tag_set;
        };
  
-       struct rcu_head rcu;
        atomic_t host_busy;                /* commands actually active on low-level */
        atomic_t host_blocked;
  
        /* ldm bits */
        struct device           shost_gendev, shost_dev;
  
 -      /*
 -       * List of hosts per template.
 -       *
 -       * This is only for use by scsi_module.c for legacy templates.
 -       * For these access to it is synchronized implicitly by
 -       * module_init/module_exit.
 -       */
 -      struct list_head sht_legacy_list;
 -
        /*
         * Points to the transport data (if any) which is allocated
         * separately
@@@ -889,6 -917,9 +890,6 @@@ static inline unsigned char scsi_host_g
        return shost->prot_guard_type;
  }
  
 -/* legacy interfaces */
 -extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
 -extern void scsi_unregister(struct Scsi_Host *);
  extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
  
  #endif /* _SCSI_SCSI_HOST_H */
This page took 0.332894 seconds and 4 git commands to generate.