obj-$(CONFIG_SCSI_PM8001) += pm8001/
obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
-obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
-obj-$(CONFIG_SCSI_SYM53C416) += sym53c416.o
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
-obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
-obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
obj-$(CONFIG_CXLFLASH) += cxlflash/
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
- oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h
info = &aac->hba_map[bus][cid];
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
- fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
+ if (aac_reset_devices || reset_devices)
+ aac->init_reset = true;
+
aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
if (!aac->fibs)
goto out_free_host;
* Map in the registers from the adapter.
*/
aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
- if ((*aac_drivers[index].init)(aac))
+ if ((*aac_drivers[index].init)(aac)) {
+ error = -ENODEV;
goto out_unmap;
+ }
if (aac->sync_mode) {
if (aac_sync_mode)
static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
{
- struct csio_lnode *ln = hw->rln;
+ struct csio_lnode *ln;
struct list_head *tmp;
/* Match siblings lnode with portid */
val = htonl(FC_PORTSPEED_1GBIT);
else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
val = htonl(FC_PORTSPEED_10GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
+ val = htonl(FC_PORTSPEED_25GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
+ val = htonl(FC_PORTSPEED_40GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
+ val = htonl(FC_PORTSPEED_50GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
+ val = htonl(FC_PORTSPEED_100GBIT);
else
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
- u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
+ u8 cdb[MAX_COMMAND_SIZE];
int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the command. */
- memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
+ memset(cdb, 0x0, MAX_COMMAND_SIZE);
cdb[0] = MAINTENANCE_IN;
if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
static int submit_stpg(struct scsi_device *sdev, int group_id,
struct scsi_sense_hdr *sshdr)
{
- u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
+ u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
put_unaligned_be16(group_id, &stpg_data[6]);
/* Prepare the command. */
- memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
+ memset(cdb, 0x0, MAX_COMMAND_SIZE);
cdb[0] = MAINTENANCE_OUT;
cdb[1] = MO_SET_TARGET_PGS;
put_unaligned_be32(stpg_len, &cdb[6]);
/*
* alua_alloc_pg - Allocate a new port_group structure
* @sdev: scsi device
- * @h: alua device_handler data
* @group_id: port group id
+ * @tpgs: target port group settings
*
* Allocate a new port_group structure for a given
* device.
/**
* alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ * @pg: ALUA port group associated with @sdev.
+ * @sdev: SCSI device for which to submit an RTPG.
+ * @qdata: Information about the callback to invoke after the RTPG.
+ * @force: Whether or not to submit an RTPG if a work item that will submit an
+ * RTPG already has been scheduled.
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
#include "scsi_logging.h"
+static int shost_eh_deadline = -1;
+
+module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(eh_deadline,
+ "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
+
static DEFINE_IDA(host_index_ida);
scsi_host_state_name(state)));
return -EINVAL;
}
-EXPORT_SYMBOL(scsi_host_set_state);
/**
* scsi_remove_host - remove a scsi host
if (shost->work_q)
destroy_workqueue(shost->work_q);
- destroy_rcu_head(&shost->rcu);
-
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
kfree(shost);
}
-static int shost_eh_deadline = -1;
-
-module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(eh_deadline,
- "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
-
static struct device_type scsi_host_type = {
.name = "scsi_host",
.release = scsi_host_dev_release,
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
shost->dma_boundary = 0xffffffff;
shost->use_blk_mq = scsi_use_blk_mq;
+ shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
}
EXPORT_SYMBOL(scsi_host_alloc);
-struct Scsi_Host *scsi_register(struct scsi_host_template *sht, int privsize)
-{
- struct Scsi_Host *shost = scsi_host_alloc(sht, privsize);
-
- if (!sht->detect) {
- printk(KERN_WARNING "scsi_register() called on new-style "
- "template for driver %s\n", sht->name);
- dump_stack();
- }
-
- if (shost)
- list_add_tail(&shost->sht_legacy_list, &sht->legacy_hosts);
- return shost;
-}
-EXPORT_SYMBOL(scsi_register);
-
-void scsi_unregister(struct Scsi_Host *shost)
-{
- list_del(&shost->sht_legacy_list);
- scsi_host_put(shost);
-}
-EXPORT_SYMBOL(scsi_unregister);
-
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
}
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(lunid);
+static DEVICE_ATTR_RO(unique_id);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
+static DEVICE_ATTR_RO(sas_address);
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
host_show_hp_ssd_smart_path_enabled, NULL);
-static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
+static DEVICE_ATTR_RO(path_info);
static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
host_show_hp_ssd_smart_path_status,
host_store_hp_ssd_smart_path_status);
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
if (unlikely(!h->msix_vectors))
return;
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- c->Header.ReplyQueue =
- raw_smp_processor_id() % h->nreply_queues;
- else
- c->Header.ReplyQueue = reply_queue % h->nreply_queues;
+ c->Header.ReplyQueue = reply_queue;
}
}
* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
- else
- cp->ReplyQueue = reply_queue % h->nreply_queues;
+ cp->ReplyQueue = reply_queue;
/*
* Set the bits in the address sent down to include:
* - performant mode bit (bit 0)
/* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->reply_queue = smp_processor_id() % h->nreply_queues;
- else
- cp->reply_queue = reply_queue % h->nreply_queues;
+ cp->reply_queue = reply_queue;
/* Set the bits in the address sent down to include:
* - performant mode bit not used in ioaccel mode 2
* - pull count (bits 0-3)
* Tell the controller to post the reply to the queue for this
* processor. This seems to give the best I/O throughput.
*/
- if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
- cp->reply_queue = smp_processor_id() % h->nreply_queues;
- else
- cp->reply_queue = reply_queue % h->nreply_queues;
+ cp->reply_queue = reply_queue;
/*
* Set the bits in the address sent down to include:
* - performant mode bit not used in ioaccel mode 2
{
dial_down_lockup_detection_during_fw_flash(h, c);
atomic_inc(&h->commands_outstanding);
+
+ reply_queue = h->reply_map[raw_smp_processor_id()];
switch (c->cmd_type) {
case CMD_IOACCEL1:
set_ioaccel1_performant_mode(h, c, reply_queue);
h->msix_vectors = 0;
}
+ static void hpsa_setup_reply_map(struct ctlr_info *h)
+ {
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < h->msix_vectors; queue++) {
+ mask = pci_irq_get_affinity(h->pdev, queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ h->reply_map[cpu] = queue;
+ }
+ return;
+
+ fallback:
+ for_each_possible_cpu(cpu)
+ h->reply_map[cpu] = 0;
+ }
+
/* If MSI/MSI-X is supported by the kernel we will try to enable it on
* controllers that are capable. If not, we use legacy INTx mode.
*/
err = hpsa_interrupt_mode(h);
if (err)
goto clean1;
+
+ /* setup mapping between CPU and reply queue */
+ hpsa_setup_reply_map(h);
+
err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
if (err)
goto clean2; /* intmode+region, pci */
return wq;
}
+ static void hpda_free_ctlr_info(struct ctlr_info *h)
+ {
+ kfree(h->reply_map);
+ kfree(h);
+ }
+
+ static struct ctlr_info *hpda_alloc_ctlr_info(void)
+ {
+ struct ctlr_info *h;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return NULL;
+
+ h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
+ if (!h->reply_map) {
+ kfree(h);
+ return NULL;
+ }
+ return h;
+ }
+
static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int dac, rc;
* the driver. See comments in hpsa.h for more info.
*/
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
- h = kzalloc(sizeof(*h), GFP_KERNEL);
+ h = hpda_alloc_ctlr_info();
if (!h) {
dev_err(&pdev->dev, "Failed to allocate controller head\n");
return -ENOMEM;
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- kfree(h); /* init_one 1 */
+ hpda_free_ctlr_info(h); /* init_one 1 */
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
*/
sas_end_task(cmd, task);
+ if (dev_is_sata(dev)) {
+ /* defer commands to libata so that libata EH can
+ * handle ata qcs correctly
+ */
+ list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+ return;
+ }
+
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
- static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
- {
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_ha_struct *ha = dev->port->ha;
- struct sas_task *task = TO_SAS_TASK(cmd);
-
- if (!dev_is_sata(dev)) {
- sas_eh_finish_cmd(cmd);
- return;
- }
-
- /* report the timeout to libata */
- sas_end_task(cmd, task);
- list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
- }
-
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
}
}
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
- int res;
+ int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
+ unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
- res = i->dft->lldd_abort_task(task);
+ spin_lock_irqsave(host->host_lock, flags);
+ /* We cannot do async aborts for SATA devices */
+ if (dev_is_sata(dev) && !host->host_eh_scheduled) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return FAILED;
+ }
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ if (task)
+ res = i->dft->lldd_abort_task(task);
+ else
+ SAS_DPRINTK("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
cmd = instance->cmd_list[i];
- cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
+ cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
GFP_KERNEL, &cmd->frame_phys_addr);
cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
return -ENOMEM;
}
- memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
if ((instance->adapter_type == MFI_SERIES) && reset_devices)
instance->use_seqnum_jbod_fp = false;
}
+ static void megasas_setup_reply_map(struct megasas_instance *instance)
+ {
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < instance->msix_vectors; queue++) {
+ mask = pci_irq_get_affinity(instance->pdev, queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ instance->reply_map[cpu] = queue;
+ }
+ return;
+
+ fallback:
+ for_each_possible_cpu(cpu)
+ instance->reply_map[cpu] = cpu % instance->msix_vectors;
+ }
+
/**
* megasas_init_fw - Initializes the FW
* @instance: Adapter soft state
goto fail_setup_irqs;
}
+ megasas_setup_reply_map(instance);
+
dev_info(&instance->pdev->dev,
"firmware supports msix\t: (%d)", fw_msix_count);
dev_info(&instance->pdev->dev,
*/
static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
{
+ instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
+ GFP_KERNEL);
+ if (!instance->reply_map)
+ return -ENOMEM;
+
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- return -ENOMEM;
+ goto fail;
break;
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- return -ENOMEM;
+ goto fail;
break;
}
return 0;
+ fail:
+ kfree(instance->reply_map);
+ instance->reply_map = NULL;
+ return -ENOMEM;
}
/*
*/
static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
{
+ kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
pci_free_consistent(instance->pdev, sizeof(u32),
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
scsi_host_put(host);
-
fail_alloc_instance:
pci_disable_device(pdev);
if (rval < 0)
goto fail_reenable_msix;
+ megasas_setup_reply_map(instance);
+
if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
spin_lock_irqsave(&poll_aen_lock, flags);
if (megasas_poll_wait_aen)
- mask = (POLLIN | POLLRDNORM);
+ mask = (EPOLLIN | EPOLLRDNORM);
else
mask = 0;
megasas_poll_wait_aen = 0;
module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt3sas_fwfault_debug, 0644);
+/**
+ * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
+ * in BAR0 space.
+ *
+ * @ioc: per adapter object
+ * @reply: reply message frame(lower 32bit addr)
+ * @index: System request message index.
+ *
+ * @Returns - Nothing
+ */
+static void
+_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
+ u32 index)
+{
+ /*
+ * 256 is offset within sys register.
+ * 256 offset MPI frame starts. Max MPI frame supported is 32.
+ * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
+ */
+ u16 cmd_credit = ioc->facts.RequestCredit + 1;
+ void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
+ MPI_FRAME_START_OFFSET +
+ (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
+
+ writel(reply, reply_free_iomem);
+}
+
+/**
+ * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
+ * to system/BAR0 region.
+ *
+ * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
+ * @src: Pointer to the Source data.
+ * @size: Size of data to be copied.
+ */
+static void
+_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
+{
+ int i;
+ u32 *src_virt_mem = (u32 *)src;
+
+ for (i = 0; i < size/4; i++)
+ writel((u32)src_virt_mem[i],
+ (void __iomem *)dst_iomem + (i * 4));
+}
+
+/**
+ * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
+ *
+ * @dst_iomem: Pointer to the destination location in BAR0 space.
+ * @src: Pointer to the Source data.
+ * @size: Size of data to be copied.
+ */
+static void
+_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
+{
+ int i;
+ u32 *src_virt_mem = (u32 *)(src);
+
+ for (i = 0; i < size/4; i++)
+ writel((u32)src_virt_mem[i],
+ (void __iomem *)dst_iomem + (i * 4));
+}
+
+/**
+ * _base_get_chain - Calculates and Returns virtual chain address
+ * for the provided smid in BAR0 space.
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @sge_chain_count: Scatter gather chain count.
+ *
+ * @Return: chain address.
+ */
+static inline void __iomem*
+_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 sge_chain_count)
+{
+ void __iomem *base_chain, *chain_virt;
+ u16 cmd_credit = ioc->facts.RequestCredit + 1;
+
+ base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
+ (cmd_credit * ioc->request_sz) +
+ REPLY_FREE_POOL_SIZE;
+ chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
+ ioc->request_sz) + (sge_chain_count * ioc->request_sz);
+ return chain_virt;
+}
+
+/**
+ * _base_get_chain_phys - Calculates and Returns physical address
+ * in BAR0 for scatter gather chains, for
+ * the provided smid.
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @sge_chain_count: Scatter gather chain count.
+ *
+ * @Return - Physical chain address.
+ */
+static inline phys_addr_t
+_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 sge_chain_count)
+{
+ phys_addr_t base_chain_phys, chain_phys;
+ u16 cmd_credit = ioc->facts.RequestCredit + 1;
+
+ base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
+ (cmd_credit * ioc->request_sz) +
+ REPLY_FREE_POOL_SIZE;
+ chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
+ ioc->request_sz) + (sge_chain_count * ioc->request_sz);
+ return chain_phys;
+}
+
+/**
+ * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
+ * buffer address for the provided smid.
+ * (Each smid can have 64K starts from 17024)
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * @Returns - Pointer to buffer location in BAR0.
+ */
+
+static void __iomem *
+_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ u16 cmd_credit = ioc->facts.RequestCredit + 1;
+ // Added extra 1 to reach end of chain.
+ void __iomem *chain_end = _base_get_chain(ioc,
+ cmd_credit + 1,
+ ioc->facts.MaxChainDepth);
+ return chain_end + (smid * 64 * 1024);
+}
+
+/**
+ * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
+ * Host buffer Physical address for the provided smid.
+ * (Each smid can have 64K starts from 17024)
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * @Returns - Pointer to buffer location in BAR0.
+ */
+static phys_addr_t
+_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ u16 cmd_credit = ioc->facts.RequestCredit + 1;
+ phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
+ cmd_credit + 1,
+ ioc->facts.MaxChainDepth);
+ return chain_end_phys + (smid * 64 * 1024);
+}
+
+/**
+ * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
+ * lookup list and Provides chain_buffer
+ * address for the matching dma address.
+ * (Each smid can have 64K starts from 17024)
+ *
+ * @ioc: per adapter object
+ * @chain_buffer_dma: Chain buffer dma address.
+ *
+ * @Returns - Pointer to chain buffer. Or Null on Failure.
+ */
+static void *
+_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
+ dma_addr_t chain_buffer_dma)
+{
+ u16 index;
+
+ for (index = 0; index < ioc->chain_depth; index++) {
+ if (ioc->chain_lookup[index].chain_buffer_dma ==
+ chain_buffer_dma)
+ return ioc->chain_lookup[index].chain_buffer;
+ }
+ pr_info(MPT3SAS_FMT
+ "Provided chain_buffer_dma address is not in the lookup list\n",
+ ioc->name);
+ return NULL;
+}
+
+/**
+ * _clone_sg_entries - MPI EP's scsiio and config requests
+ * are handled here. Base function for
+ * double buffering, before submitting
+ * the requests.
+ *
+ * @ioc: per adapter object.
+ * @mpi_request: mf request pointer.
+ * @smid: system request message index.
+ *
+ * @Returns: Nothing.
+ */
+static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
+ void *mpi_request, u16 smid)
+{
+ Mpi2SGESimple32_t *sgel, *sgel_next;
+ u32 sgl_flags, sge_chain_count = 0;
+ bool is_write = 0;
+ u16 i = 0;
+ void __iomem *buffer_iomem;
+ phys_addr_t buffer_iomem_phys;
+ void __iomem *buff_ptr;
+ phys_addr_t buff_ptr_phys;
+ void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
+ void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
+ phys_addr_t dst_addr_phys;
+ MPI2RequestHeader_t *request_hdr;
+ struct scsi_cmnd *scmd;
+ struct scatterlist *sg_scmd = NULL;
+ int is_scsiio_req = 0;
+
+ request_hdr = (MPI2RequestHeader_t *) mpi_request;
+
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
+ Mpi25SCSIIORequest_t *scsiio_request =
+ (Mpi25SCSIIORequest_t *)mpi_request;
+ sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
+ is_scsiio_req = 1;
+ } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
+ Mpi2ConfigRequest_t *config_req =
+ (Mpi2ConfigRequest_t *)mpi_request;
+ sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
+ } else
+ return;
+
+ /* From smid we can get scsi_cmd, once we have sg_scmd,
+ * we just need to get sg_virt and sg_next to get virual
+ * address associated with sgel->Address.
+ */
+
+ if (is_scsiio_req) {
+ /* Get scsi_cmd using smid */
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (scmd == NULL) {
+ pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+ return;
+ }
+
+ /* Get sg_scmd from scmd provided */
+ sg_scmd = scsi_sglist(scmd);
+ }
+
+ /*
+ * 0 - 255 System register
+ * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
+ * 4352 - 4864 Reply_free pool (512 byte is reserved
+ * considering maxCredit 32. Reply need extra
+ * room, for mCPU case kept four times of
+ * maxCredit).
+ * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
+ * 128 byte size = 12288)
+ * 17152 - x Host buffer mapped with smid.
+ * (Each smid can have 64K Max IO.)
+ * BAR0+Last 1K MSIX Addr and Data
+ * Total size in use 2113664 bytes of 4MB BAR0
+ */
+
+ buffer_iomem = _base_get_buffer_bar0(ioc, smid);
+ buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
+
+ buff_ptr = buffer_iomem;
+ buff_ptr_phys = buffer_iomem_phys;
+ WARN_ON(buff_ptr_phys > U32_MAX);
+
+ if (sgel->FlagsLength &
+ (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
+ is_write = 1;
+
+ for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
+
+ sgl_flags = (sgel->FlagsLength >> MPI2_SGE_FLAGS_SHIFT);
+
+ switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
+ case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
+ /*
+ * Helper function which on passing
+ * chain_buffer_dma returns chain_buffer. Get
+ * the virtual address for sgel->Address
+ */
+ sgel_next =
+ _base_get_chain_buffer_dma_to_chain_buffer(ioc,
+ sgel->Address);
+ if (sgel_next == NULL)
+ return;
+ /*
+ * This is coping 128 byte chain
+ * frame (not a host buffer)
+ */
+ dst_chain_addr[sge_chain_count] =
+ _base_get_chain(ioc,
+ smid, sge_chain_count);
+ src_chain_addr[sge_chain_count] =
+ (void *) sgel_next;
+ dst_addr_phys = _base_get_chain_phys(ioc,
+ smid, sge_chain_count);
+ WARN_ON(dst_addr_phys > U32_MAX);
+ sgel->Address = (u32)dst_addr_phys;
+ sgel = sgel_next;
+ sge_chain_count++;
+ break;
+ case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
+ if (is_write) {
+ if (is_scsiio_req) {
+ _base_clone_to_sys_mem(buff_ptr,
+ sg_virt(sg_scmd),
+ (sgel->FlagsLength & 0x00ffffff));
+ /*
+ * FIXME: this relies on a a zero
+ * PCI mem_offset.
+ */
+ sgel->Address = (u32)buff_ptr_phys;
+ } else {
+ _base_clone_to_sys_mem(buff_ptr,
+ ioc->config_vaddr,
+ (sgel->FlagsLength & 0x00ffffff));
+ sgel->Address = (u32)buff_ptr_phys;
+ }
+ }
+ buff_ptr += (sgel->FlagsLength & 0x00ffffff);
+ buff_ptr_phys += (sgel->FlagsLength & 0x00ffffff);
+ if ((sgel->FlagsLength &
+ (MPI2_SGE_FLAGS_END_OF_BUFFER
+ << MPI2_SGE_FLAGS_SHIFT)))
+ goto eob_clone_chain;
+ else {
+ /*
+ * Every single element in MPT will have
+ * associated sg_next. Better to sanity that
+ * sg_next is not NULL, but it will be a bug
+ * if it is null.
+ */
+ if (is_scsiio_req) {
+ sg_scmd = sg_next(sg_scmd);
+ if (sg_scmd)
+ sgel++;
+ else
+ goto eob_clone_chain;
+ }
+ }
+ break;
+ }
+ }
+
+eob_clone_chain:
+ for (i = 0; i < sge_chain_count; i++) {
+ if (is_scsiio_req)
+ _base_clone_to_sys_mem(dst_chain_addr[i],
+ src_chain_addr[i], ioc->request_sz);
+ }
+}
+
/**
* mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
ack_request->EventContext = mpi_reply->EventContext;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
out:
0 : ioc->reply_free_host_index + 1;
ioc->reply_free[ioc->reply_free_host_index] =
cpu_to_le32(reply);
+ if (ioc->is_mcpu_endpoint)
+ _base_clone_reply_to_sys_mem(ioc,
+ cpu_to_le32(reply),
+ ioc->reply_free_host_index);
writel(ioc->reply_free_host_index,
&ioc->chip->ReplyFreeHostIndex);
}
struct sysinfo s;
u64 consistent_dma_mask;
+ if (ioc->is_mcpu_endpoint)
+ goto try_32bit;
+
if (ioc->dma_mask)
consistent_dma_mask = DMA_BIT_MASK(64);
else
}
}
+ try_32bit:
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
ioc->base_add_sg_single = &_base_add_sg_single_32;
continue;
}
- for_each_cpu(cpu, mask)
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (cpu >= ioc->cpu_msix_table_sz)
+ break;
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+ }
}
return;
}
u32 pio_sz;
int i, r = 0;
u64 pio_chip = 0;
- u64 chip_phys = 0;
+ phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
if (memap_sz)
continue;
ioc->chip_phys = pci_resource_start(pdev, i);
- chip_phys = (u64)ioc->chip_phys;
+ chip_phys = ioc->chip_phys;
memap_sz = pci_resource_len(pdev, i);
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
}
"IO-APIC enabled"),
pci_irq_vector(ioc->pdev, reply_q->msix_index));
- pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
- ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
+ ioc->name, &chip_phys, ioc->chip, memap_sz);
pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
ioc->name, (unsigned long long)pio_chip, pio_sz);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
}
+/**
+ * _base_mpi_ep_writeq - 32 bit write to MMIO
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * This special handling for MPI EP to take care of 32 bit
+ * environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+static inline void
+_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
+ spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+
/**
* _base_writeq - 64 bit write to MMIO
* @ioc: per adapter object
static inline void
_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
{
- unsigned long flags;
- __u64 data_out = cpu_to_le64(b);
-
- spin_lock_irqsave(writeq_lock, flags);
- writel((u32)(data_out), addr);
- writel((u32)(data_out >> 32), (addr + 4));
- spin_unlock_irqrestore(writeq_lock, flags);
+ _base_mpi_ep_writeq(b, addr, writeq_lock);
}
#endif
+/**
+ * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+ void *mpi_req_iomem;
+ __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
+
+ _clone_sg_entries(ioc, (void *) mfp, smid);
+ mpi_req_iomem = (void *)ioc->chip +
+ MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
+ _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
+ ioc->request_sz);
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
/**
* _base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
}
/**
- * _base_put_smid_fast_path - send fast path request to firmware
+ * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle
*
* Return nothing.
*/
-static void
-_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+void
+mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle)
{
Mpi2RequestDescriptorUnion_t descriptor;
}
/**
- * _base_put_smid_hi_priority - send Task Management request to firmware
+ * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
* @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
-static void
-_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+void
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
- u64 *request = (u64 *)&descriptor;
+ void *mpi_req_iomem;
+ u64 *request;
+
+ if (ioc->is_mcpu_endpoint) {
+ MPI2RequestHeader_t *request_hdr;
+
+ __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
+
+ request_hdr = (MPI2RequestHeader_t *)mfp;
+ /* TBD 256 is offset within sys register. */
+ mpi_req_iomem = (void *)ioc->chip + MPI_FRAME_START_OFFSET
+ + (smid * ioc->request_sz);
+ _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
+ ioc->request_sz);
+ }
+
+ request = (u64 *)&descriptor;
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
- _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
+ if (ioc->is_mcpu_endpoint)
+ _base_mpi_ep_writeq(*request,
+ &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+ else
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
}
/**
- * _base_put_smid_nvme_encap - send NVMe encapsulated request to
+ * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
* firmware
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-static void
-_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+void
+mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
}
/**
- * _base_put_smid_default - Default, primarily used for config pages
+ * mpt3sas_base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
*
* Return nothing.
*/
-static void
-_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+void
+mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
Mpi2RequestDescriptorUnion_t descriptor;
- u64 *request = (u64 *)&descriptor;
+ void *mpi_req_iomem;
+ u64 *request;
+ MPI2RequestHeader_t *request_hdr;
+
+ if (ioc->is_mcpu_endpoint) {
+ __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
+ request_hdr = (MPI2RequestHeader_t *)mfp;
+
+ _clone_sg_entries(ioc, (void *) mfp, smid);
+ /* TBD 256 is offset within sys register */
+ mpi_req_iomem = (void *)ioc->chip +
+ MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
+ _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
+ ioc->request_sz);
+ }
+ request = (u64 *)&descriptor;
descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
descriptor.Default.SMID = cpu_to_le16(smid);
descriptor.Default.LMID = 0;
descriptor.Default.DescriptorTypeDependent = 0;
- _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
- &ioc->scsi_lookup_lock);
-}
-
-/**
-* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
-* Atomic Request Descriptor
-* @ioc: per adapter object
-* @smid: system request message index
-* @handle: device handle, unused in this function, for function type match
-*
-* Return nothing.
-*/
-static void
-_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle)
-{
- Mpi26AtomicRequestDescriptor_t descriptor;
- u32 *request = (u32 *)&descriptor;
-
- descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
- descriptor.MSIxIndex = _base_get_msix_index(ioc);
- descriptor.SMID = cpu_to_le16(smid);
-
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-/**
- * _base_put_smid_fast_path_atomic - send fast path request to firmware
- * using Atomic Request Descriptor
- * @ioc: per adapter object
- * @smid: system request message index
- * @handle: device handle, unused in this function, for function type match
- * Return nothing
- */
-static void
-_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 handle)
-{
- Mpi26AtomicRequestDescriptor_t descriptor;
- u32 *request = (u32 *)&descriptor;
-
- descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
- descriptor.MSIxIndex = _base_get_msix_index(ioc);
- descriptor.SMID = cpu_to_le16(smid);
-
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-/**
- * _base_put_smid_hi_priority_atomic - send Task Management request to
- * firmware using Atomic Request Descriptor
- * @ioc: per adapter object
- * @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
- *
- * Return nothing.
- */
-static void
-_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
- u16 msix_task)
-{
- Mpi26AtomicRequestDescriptor_t descriptor;
- u32 *request = (u32 *)&descriptor;
-
- descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.MSIxIndex = msix_task;
- descriptor.SMID = cpu_to_le16(smid);
-
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-/**
- * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
- * firmware using Atomic Request Descriptor
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Return nothing.
- */
-static void
-_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- Mpi26AtomicRequestDescriptor_t descriptor;
- u32 *request = (u32 *)&descriptor;
-
- descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
- descriptor.MSIxIndex = _base_get_msix_index(ioc);
- descriptor.SMID = cpu_to_le16(smid);
-
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
-}
-
-/**
- * _base_put_smid_default - Default, primarily used for config pages
- * use Atomic Request Descriptor
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Return nothing.
- */
-static void
-_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- Mpi26AtomicRequestDescriptor_t descriptor;
- u32 *request = (u32 *)&descriptor;
-
- descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
- descriptor.MSIxIndex = _base_get_msix_index(ioc);
- descriptor.SMID = cpu_to_le16(smid);
-
- writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+ if (ioc->is_mcpu_endpoint)
+ _base_mpi_ep_writeq(*request,
+ &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+ else
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
}
/**
sg_tablesize = min_t(unsigned short, sg_tablesize,
MPT_KDUMP_MIN_PHYS_SEGMENTS);
- if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
- sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
- else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
- sg_tablesize = min_t(unsigned short, sg_tablesize,
- SG_MAX_SEGMENTS);
- pr_warn(MPT3SAS_FMT
- "sg_tablesize(%u) is bigger than kernel"
- " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
- sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+ if (ioc->is_mcpu_endpoint)
+ ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
+ else {
+ if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
+ sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
+ sg_tablesize = min_t(unsigned short, sg_tablesize,
+ SG_MAX_SEGMENTS);
+ pr_warn(MPT3SAS_FMT
+ "sg_tablesize(%u) is bigger than kernel "
+ "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
+ sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+ }
+ ioc->shost->sg_tablesize = sg_tablesize;
}
- ioc->shost->sg_tablesize = sg_tablesize;
ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
(facts->RequestCredit / 4));
/* reply free queue sizing - taking into account for 64 FW events */
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- /* calculate reply descriptor post queue depth */
- ioc->reply_post_queue_depth = ioc->hba_queue_depth +
- ioc->reply_free_queue_depth + 1 ;
- /* align the reply post queue on the next 16 count boundary */
- if (ioc->reply_post_queue_depth % 16)
- ioc->reply_post_queue_depth += 16 -
- (ioc->reply_post_queue_depth % 16);
+ /* mCPU manage single counters for simplicity */
+ if (ioc->is_mcpu_endpoint)
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
+ else {
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+ }
if (ioc->reply_post_queue_depth >
facts->MaxReplyDescriptorPostQueueDepth) {
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
if ((facts->IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
ioc->rdpq_array_capable = 1;
- if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
- ioc->atomic_desc_capable = 1;
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
init_completion(&ioc->port_enable_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
return 0;
}
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
init_completion(&ioc->base_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
/* initialize Reply Free Queue */
for (i = 0, reply_address = (u32)ioc->reply_dma ;
i < ioc->reply_free_queue_depth ; i++, reply_address +=
- ioc->reply_sz)
+ ioc->reply_sz) {
ioc->reply_free[i] = cpu_to_le32(reply_address);
+ if (ioc->is_mcpu_endpoint)
+ _base_clone_reply_to_sys_mem(ioc,
+ (__le32)reply_address, i);
+ }
/* initialize reply queues */
if (ioc->is_driver_loading)
break;
}
- if (ioc->atomic_desc_capable) {
- ioc->put_smid_default = &_base_put_smid_default_atomic;
- ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
- ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
- ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
- ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
- } else {
- ioc->put_smid_default = &_base_put_smid_default;
+ if (ioc->is_mcpu_endpoint)
+ ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
+ else
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
- ioc->put_smid_fast_path = &_base_put_smid_fast_path;
- ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
- ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
- }
-
/*
* These function pointers for other requests that don't
}
/**
- * _wait_for_commands_to_complete - reset controller
+ * mpt3sas_wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
* This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
- static void
- _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
+ void
+ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
is_fault = 1;
}
_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
- _wait_for_commands_to_complete(ioc);
+ mpt3sas_wait_for_commands_to_complete(ioc);
_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
#define MPT_MIN_PHYS_SEGMENTS 16
#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32
+#define MCPU_MAX_CHAINS_PER_IO 3
+
#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
#else
#define MPT3SAS_NVME_QUEUE_DEPTH 128
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
+#define MPI_FRAME_START_OFFSET 256
+#define REPLY_FREE_POOL_SIZE 512 /*(32 maxcredix *4)*(4 times)*/
#define MPT_MAX_CALLBACKS 32
char tmp_string[MPT_STRING_LENGTH];
struct pci_dev *pdev;
Mpi2SystemInterfaceRegs_t __iomem *chip;
- resource_size_t chip_phys;
+ phys_addr_t chip_phys;
int logging_level;
int fwfault_debug;
u8 ir_firmware;
u16 config_page_sz;
void *config_page;
dma_addr_t config_page_dma;
+ void *config_vaddr;
/* scsiio request */
u16 hba_queue_depth;
u32 ring_buffer_offset;
u32 ring_buffer_sz;
u8 is_warpdrive;
+ u8 is_mcpu_endpoint;
u8 hide_ir_msg;
u8 mfg_pg10_hide_flag;
u8 hide_drives;
void *device_remove_in_progress;
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
- u8 atomic_desc_capable;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
- PUT_SMID_IO_FP_HIP put_smid_fast_path;
- PUT_SMID_IO_FP_HIP put_smid_hi_priority;
- PUT_SMID_DEFAULT put_smid_default;
- PUT_SMID_DEFAULT put_smid_nvme_encap;
};
dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task);
+void mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
+ void
+ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
+
/* scsih shared API */
struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- ioc->put_smid_hi_priority(ioc, smid, msix_task);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
"device been deleted! scmd(%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
- if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ ioc->remove_host) {
starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
scmd);
scmd->result = DID_NO_CONNECT << 16;
ioc->name, scmd);
scsi_print_command(scmd);
- if (ioc->is_driver_loading) {
+ if (ioc->is_driver_loading || ioc->remove_host) {
pr_info(MPT3SAS_FMT "Blocking the host reset\n",
ioc->name);
r = FAILED;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
set_bit(handle, ioc->device_remove_in_progress);
- ioc->put_smid_hi_priority(ioc, smid, 0);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = mpi_request_tm->DevHandle;
- ioc->put_smid_default(ioc, smid_sas_ctrl);
+ mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
return _scsih_check_for_pending_tm(ioc, smid);
}
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- ioc->put_smid_hi_priority(ioc, smid, 0);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
}
/**
ack_request->EventContext = event_context;
ack_request->VF_ID = 0; /* TODO */
ack_request->VP_ID = 0;
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
}
/**
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
mpi_request->DevHandle = handle;
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
}
/**
st = scsi_cmd_priv(scmd);
mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
- if (ioc->pci_error_recovery)
+ if (ioc->pci_error_recovery || ioc->remove_host)
scmd->result = DID_NO_CONNECT << 16;
else
scmd->result = DID_RESET << 16;
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
MPI25_SCSIIO_IOFLAGS_FAST_PATH);
- ioc->put_smid_fast_path(ioc, smid, handle);
+ mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
} else
ioc->put_smid_scsi_io(ioc, smid,
le16_to_cpu(mpi_request->DevHandle));
} else
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
return 0;
out:
handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
if (!ioc->hide_ir_msg)
pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
init_completion(&ioc->scsih_cmds.done);
- ioc->put_smid_default(ioc, smid);
+ mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
unsigned long flags;
ioc->remove_host = 1;
+
+ mpt3sas_wait_for_commands_to_complete(ioc);
+ _scsih_flush_running_cmds(ioc);
+
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
unsigned long flags;
ioc->remove_host = 1;
+
+ mpt3sas_wait_for_commands_to_complete(ioc);
+ _scsih_flush_running_cmds(ioc);
+
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
case MPI2_MFGPAGE_DEVID_SAS2308_1:
case MPI2_MFGPAGE_DEVID_SAS2308_2:
case MPI2_MFGPAGE_DEVID_SAS2308_3:
+ case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
return MPI2_VERSION;
case MPI25_MFGPAGE_DEVID_SAS3004:
case MPI25_MFGPAGE_DEVID_SAS3008:
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt2_ids++;
sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
- if (pdev->device == MPI2_MFGPAGE_DEVID_SSS6200) {
+ switch (pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SSS6200:
ioc->is_warpdrive = 1;
ioc->hide_ir_msg = 1;
- } else
+ break;
+ case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
+ ioc->is_mcpu_endpoint = 1;
+ break;
+ default:
ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
+ break;
+ }
break;
case MPI25_VERSION:
case MPI26_VERSION:
shost->transportt = mpt3sas_transport_template;
shost->unique_id = ioc->id;
- if (max_sectors != 0xFFFF) {
- if (max_sectors < 64) {
- shost->max_sectors = 64;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. Assigning "
- "value of 64.\n", ioc->name, max_sectors);
- } else if (max_sectors > 32767) {
- shost->max_sectors = 32767;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. Assigning "
- "default value of 32767.\n", ioc->name,
- max_sectors);
- } else {
- shost->max_sectors = max_sectors & 0xFFFE;
- pr_info(MPT3SAS_FMT
+ if (ioc->is_mcpu_endpoint) {
+ /* mCPU MPI support 64K max IO */
+ shost->max_sectors = 128;
+ pr_info(MPT3SAS_FMT
"The max_sectors value is set to %d\n",
ioc->name, shost->max_sectors);
+ } else {
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. " \
+ "Assigning value of 64.\n", \
+ ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767." \
+ "Assigning default value of 32767.\n", \
+ ioc->name, max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ pr_info(MPT3SAS_FMT
+ "The max_sectors value is set to %d\n",
+ ioc->name, shost->max_sectors);
+ }
}
}
-
/* register EEDP capabilities with SCSI layer */
if (prot_mask > 0)
scsi_host_set_prot(shost, prot_mask);
snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
"fw_event_%s%d", ioc->driver_name, ioc->id);
ioc->firmware_event_thread = alloc_ordered_workqueue(
- ioc->firmware_event_name, WQ_MEM_RECLAIM);
+ ioc->firmware_event_name, 0);
if (!ioc->firmware_event_thread) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
+ PCI_ANY_ID, PCI_ANY_ID },
/* SSS6200 */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
PCI_ANY_ID, PCI_ANY_ID },
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct iscsi_task_context *task_ctx;
+ struct e4_iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
(qedi->bdq_prod_idx % qedi->rq_num_entries));
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
- "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
- cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+ "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
struct scsi_bd *pbl;
/* Obtain buffer address from rqe_opaque */
- idx = cqe->rqe_opaque.lo;
+ idx = cqe->rqe_opaque;
if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, idx);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
/* Increment producer to let f/w know we've handled the frame */
qedi->bdq_prod_idx += count;
iscsi_cid = cqe->conn_id;
qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
+ if (!qedi_conn) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "icid not found 0x%x\n", cqe->conn_id);
+ return;
+ }
/* Based on this itt get the corresponding qedi_cmd */
spin_lock_bh(&qedi_conn->tmf_work_lock);
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
qedi_cmd->task_id = tid;
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct iscsi_task_context *fw_task_ctx;
+ struct e4_iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
return -ENOMEM;
fw_task_ctx =
- (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
- memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+ (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ tid);
+ memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
cmd->task_id = tid;
{
struct qedi_ctx *qedi;
struct qedi_endpoint *qedi_ep;
- struct async_data *data;
+ struct iscsi_eqe_data *data;
int rval = 0;
if (!context || !fw_handle) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
- data = (struct async_data *)fw_handle;
+ data = (struct iscsi_eqe_data *)fw_handle;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
- data->cid, data->itid, data->error_code,
- data->fw_debug_param);
+ "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+ data->icid, data->conn_id, data->error_code,
+ data->error_pdu_opcode_reserved);
- qedi_ep = qedi->ep_tbl[data->cid];
+ qedi_ep = qedi->ep_tbl[data->icid];
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
"Cannot process event, ep already disconnected, cid=0x%x\n",
- data->cid);
+ data->icid);
WARN_ON(1);
return -ENODEV;
}
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block), &sb_phys,
+ sizeof(struct status_block_e4), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
- qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
err_alloc_mem:
return rval;
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block *sb = sb_info->sb_virt;
+ struct status_block_e4 *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
pbl, pbl->address.hi, pbl->address.lo, i);
- pbl->opaque.hi = 0;
- pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+ pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+ pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+ pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
pbl++;
}
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc = 1;
u32 ipv6_en, dhcp_en, ip_len;
struct nvm_iscsi_block *block;
switch (type) {
case ISCSI_BOOT_ETH_IP_ADDR:
- rc = snprintf(str, ip_len, fmt, ip);
+ rc = snprintf(buf, ip_len, fmt, ip);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- rc = snprintf(str, ip_len, fmt, sub);
+ rc = snprintf(buf, ip_len, fmt, sub);
break;
case ISCSI_BOOT_ETH_GATEWAY:
- rc = snprintf(str, ip_len, fmt, gw);
+ rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(str, 3, "%hhd\n",
+ rc = snprintf(buf, 3, "%hhd\n",
SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
- rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+ rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
break;
case ISCSI_BOOT_ETH_VLAN:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(initiator->generic_cont0,
NVM_ISCSI_CFG_INITIATOR_VLAN));
break;
case ISCSI_BOOT_ETH_ORIGIN:
if (dhcp_en)
- rc = snprintf(str, 3, "3\n");
+ rc = snprintf(buf, 3, "3\n");
break;
default:
rc = 0;
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc;
struct nvm_iscsi_block *block;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- initiator->initiator_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ initiator->initiator_name.byte);
break;
default:
rc = 0;
qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
char *buf, enum qedi_nvm_tgts idx)
{
- char *str = buf;
int rc = 1;
u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
struct nvm_iscsi_block *block;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- block->target[idx].target_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ block->target[idx].target_name.byte);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
if (ipv6_en)
- rc = snprintf(str, ip_len, "%pI6\n",
+ rc = snprintf(buf, ip_len, "%pI6\n",
block->target[idx].ipv6_addr.byte);
else
- rc = snprintf(str, ip_len, "%pI4\n",
+ rc = snprintf(buf, ip_len, "%pI4\n",
block->target[idx].ipv4_addr.byte);
break;
case ISCSI_BOOT_TGT_PORT:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(block->target[idx].generic_cont0,
NVM_ISCSI_CFG_TARGET_TCP_PORT));
break;
case ISCSI_BOOT_TGT_LUN:
- rc = snprintf(str, 22, "%.*d\n",
+ rc = snprintf(buf, 22, "%.*d\n",
block->target[idx].lun.value[1],
block->target[idx].lun.value[0]);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- chap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- chap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- mchap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- mchap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
default:
rc = 0;
}
#ifdef CONFIG_DEBUG_FS
- qedi_dbg_host_init(&qedi->dbg_ctx, &qedi_debugfs_ops,
- &qedi_dbg_fops);
+ qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops,
+ qedi_dbg_fops);
#endif
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
* | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
- * | Mailbox commands | 0x1205 | 0x11a2-0x11ff |
+ * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc010 | |
- * | Misc | 0xd302 | 0xd031-0xd0ff |
+ * | Misc | 0xd303 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
* | Target Mode | 0xe081 | |
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
- * @ha: HA context
+ * @vha: HA context
* @hardware_locked: Called with the hardware_lock
*/
void
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
- * @ha: HA context
+ * @vha: HA context
* @hardware_locked: Called with the hardware_lock
*/
void
struct name_list_extended {
struct get_name_list_extended *l;
dma_addr_t ldma;
- struct list_head fcports; /* protect by sess_list */
+ struct list_head fcports;
+ spinlock_t fcports_lock;
u32 size;
- u8 sent;
};
/*
* Timeout timer counts in seconds
#define NVME_PRLI_SP_DISCOVERY BIT_3
uint8_t nvme_flag;
#define NVME_FLAG_REGISTERED 4
+#define NVME_FLAG_DELETING 2
+#define NVME_FLAG_RESETTING 1
struct fc_port *conflict;
unsigned char logout_completed;
SF_QUEUED = BIT_1,
};
+enum fc4type_t {
+ FS_FC4TYPE_FCP = BIT_0,
+ FS_FC4TYPE_NVME = BIT_1,
+};
+
struct fab_scan_rp {
port_id_t id;
+ enum fc4type_t fc4type;
u8 port_name[8];
u8 node_name[8];
};
} nack;
struct {
u8 fc4_type;
+ srb_t *sp;
} gpnft;
} u;
};
struct work_struct q_work;
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
- struct list_head nvme_done_list;
uint16_t cpuid;
struct qla_tgt_counters tgt_counters;
};
struct nvme_fc_local_port *nvme_local_port;
struct completion nvme_del_done;
struct list_head nvme_rport_list;
- atomic_t nvme_active_aen_cnt;
- uint16_t nvme_last_rptd_aen;
uint16_t fcoe_vlan_id;
uint16_t fcoe_fcf_idx;
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
- * @ha: HA context
- * @req_size: request size in bytes
- * @rsp_size: response size in bytes
+ * @vha: HA context
+ * @arg: CT arguments
*
- * Returns a pointer to the @ha's ms_iocb.
+ * Returns a pointer to the @vha's ms_iocb.
*/
void *
qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
/**
* qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
- * @ha: HA context
- * @req_size: request size in bytes
- * @rsp_size: response size in bytes
+ * @vha: HA context
+ * @arg: CT arguments
*
* Returns a pointer to the @ha's ms_iocb.
*/
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
- * @ct_req: CT request buffer
+ * @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
/**
* qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
- * @ha: HA context
+ * @vha: HA context
* @fcport: fcport entry to updated
*
* Returns 0 on success.
/**
* qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* NOTE: Non-Nx_Ports are not requested.
/**
* qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
/**
* qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
+ * @type: not used
*
* Returns 0 on success.
*/
/**
* qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
- * @ha: HA context
+ * @vha: HA context
* @cmd: GS command
* @scmd_len: Subcommand length
* @data_size: response size in bytes
/**
* qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
- * @ha: HA context
+ * @vha: HA context
* @fcport: fcport entry to updated
*
* This command uses the old Exectute SNS Command mailbox routine.
/**
* qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
/**
* qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
/**
* qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
/**
* qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
*
/**
* qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
- * HBA.
- * @ha: HA context
+ * @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
*
/**
* qla2x00_mgmt_svr_login() - Login to fabric Management Service.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
- * @ha: HA context
+ * @vha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
/**
* qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
- * @ha: HA context
+ * @vha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
- * @ct_req: CT request buffer
+ * @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
}
/**
- * qla2x00_fdmi_rhba() -
- * @ha: HA context
+ * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
}
/**
- * qla2x00_fdmi_rpa() -
- * @ha: HA context
+ * qla2x00_fdmi_rpa() - perform RPA registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
}
/**
- * qla2x00_fdmiv2_rhba() -
- * @ha: HA context
+ * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_fdmi_dhba() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_fdmiv2_rpa() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_fdmi_register() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
/**
* qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
/**
* qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
*
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
*/
sp->free(sp);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
sp->free(sp);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
fc_port_t *fcport;
u32 i, rc;
bool found;
- u8 fc4type = sp->gen2;
struct fab_scan_rp *rp;
unsigned long flags;
"%s %d %8phC post new sess\n",
__func__, __LINE__, rp->port_name);
qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
- rp->node_name, NULL, fc4type);
+ rp->node_name, NULL, rp->fc4type);
}
}
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
-
- if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled)
- qla24xx_async_gpnft(vha, FC4_TYPE_NVME);
}
-static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
+ struct srb *sp)
{
- struct srb *sp = s;
- struct scsi_qla_host *vha = sp->vha;
- struct qla_work_evt *e;
+ struct qla_hw_data *ha = vha->hw;
+ int num_fibre_dev = ha->max_fibre_devices;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
struct ct_sns_gpnft_rsp *ct_rsp =
(struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct ct_sns_gpn_ft_data *d;
struct fab_scan_rp *rp;
+ u16 cmd = be16_to_cpu(ct_req->command);
+ u8 fc4_type = sp->gen2;
int i, j, k;
+ port_id_t id;
+ u8 found;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < num_fibre_dev; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (fc4_type == FC4_TYPE_FCP_SCSI) {
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ rp->fc4type = FS_FC4TYPE_FCP;
+ } else {
+ for (k = 0; k < num_fibre_dev; k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ } else {
+ /* Search if the fibre device supports FC4_TYPE_NVME */
+ if (cmd == GPN_FT_CMD) {
+ found = 0;
+
+ for (k = 0; k < num_fibre_dev; k++) {
+ rp = &vha->scan.l[k];
+ if (!memcmp(rp->port_name,
+ d->port_name, 8)) {
+ /*
+ * Supports FC-NVMe & FCP
+ */
+ rp->fc4type |= FS_FC4TYPE_NVME;
+ found = 1;
+ break;
+ }
+ }
+
+ /* We found new FC-NVMe only port */
+ if (!found) {
+ for (k = 0; k < num_fibre_dev; k++) {
+ rp = &vha->scan.l[k];
+ if (wwn_to_u64(rp->port_name)) {
+ continue;
+ } else {
+ rp->id = id;
+ memcpy(rp->port_name,
+ d->port_name, 8);
+ rp->fc4type =
+ FS_FC4TYPE_NVME;
+ break;
+ }
+ }
+ }
+ } else {
+ for (k = 0; k < num_fibre_dev; k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
u16 cmd = be16_to_cpu(ct_req->command);
+ u8 fc4_type = sp->gen2;
+ unsigned long flags;
/* gen2 field is holding the fc4type */
ql_dbg(ql_dbg_disc, vha, 0xffff,
return;
}
- if (!res) {
- port_id_t id;
- u64 wwn;
-
- j = 0;
- for (i = 0; i < vha->hw->max_fibre_devices; i++) {
- d = &ct_rsp->entries[i];
-
- id.b.rsvd_1 = 0;
- id.b.domain = d->port_id[0];
- id.b.area = d->port_id[1];
- id.b.al_pa = d->port_id[2];
- wwn = wwn_to_u64(d->port_name);
+ if (!res)
+ qla2x00_find_free_fcp_nvme_slot(vha, sp);
- if (id.b24 == 0 || wwn == 0)
- continue;
+ if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
+ cmd == GNN_FT_CMD) {
+ del_timer(&sp->u.iocb_cmd.timer);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
- if (cmd == GPN_FT_CMD) {
- rp = &vha->scan.l[j];
- rp->id = id;
- memcpy(rp->port_name, d->port_name, 8);
- j++;
- } else {/* GNN_FT_CMD */
- for (k = 0; k < vha->hw->max_fibre_devices;
- k++) {
- rp = &vha->scan.l[k];
- if (id.b24 == rp->id.b24) {
- memcpy(rp->node_name,
- d->port_name, 8);
- break;
- }
- }
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
+ if (!e) {
+ /*
+ * please ignore kernel warning. Otherwise,
+ * we have mem leak.
+ */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
}
+ e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+ sp->rc = res;
+ e->u.gpnft.sp = sp;
+
+ qla2x00_post_work(vha, e);
+ return;
}
if (cmd == GPN_FT_CMD)
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online) {
+ spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
}
"%s: req %p rsp %p are not setup\n",
__func__, sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.rsp);
+ spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
goto done_free_sp;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0xfffff,
+ "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+ __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
+ sp->u.iocb_cmd.u.ctarg.req_size);
+
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
}
/* Get WWPN list for certain fc4_type */
-int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
struct ct_sns_pkt *ct_sns;
u32 rspsz;
unsigned long flags;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
if (!vha->flags.online)
return rval;
vha->scan.scan_flags |= SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
- if (!sp) {
- vha->scan.scan_flags &= ~SF_SCANNING;
+ if (fc4_type == FC4_TYPE_FCP_SCSI) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: Performing FCP Scan\n", __func__);
+
+ if (sp)
+ sp->free(sp); /* should not happen */
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ return rval;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
+ &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
+ &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ goto done_free_sp;
+ }
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
+ &vha->hw->pdev->dev, rspsz,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ goto done_free_sp;
+ }
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan list size %d\n", __func__, vha->scan.size);
+
+ memset(vha->scan.l, 0, vha->scan.size);
+ } else if (!sp) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "NVME scan did not provide SP\n");
return rval;
}
sp->gen2 = fc4_type;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
- GFP_KERNEL);
- if (!sp->u.iocb_cmd.u.ctarg.req) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
- vha->scan.scan_flags &= ~SF_SCANNING;
- goto done_free_sp;
- }
-
rspsz = sizeof(struct ct_sns_gpnft_rsp) +
((vha->hw->max_fibre_devices - 1) *
sizeof(struct ct_sns_gpn_ft_data));
- sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
- rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
- if (!sp->u.iocb_cmd.u.ctarg.rsp) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
- vha->scan.scan_flags &= ~SF_SCANNING;
- goto done_free_sp;
- }
-
- memset(vha->scan.l, 0, vha->scan.size);
-
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
/* GPN_FT req */
ct_req->req.gpn_ft.port_type = fc4_type;
- sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
+ spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
}
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- if (sp->type != SRB_ELS_DCMD)
- sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct event_arg ea;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
switch (sp->type) {
case SRB_LOGIN_CMD:
- if (!fcport)
- break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_PLOGI_DONE;
- ea.fcport = sp->fcport;
- ea.data[0] = lio->u.logio.data[0];
- ea.data[1] = lio->u.logio.data[1];
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
case SRB_LOGOUT_CMD:
- if (!fcport)
- break;
- qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
- break;
case SRB_CT_PTHRU_CMD:
case SRB_MB_IOCB:
case SRB_NACK_PLOGI:
sp->free(sp);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
qla2x00_async_logout_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
- qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ sp->fcport->login_gen++;
+ qlt_logo_completion_handler(sp->fcport, res);
sp->free(sp);
}
done_free_sp:
sp->free(sp);
done:
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
return rval;
}
qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
/* Don't re-login in target mode */
if (!fcport->tgt_session)
qla2x00_mark_device_lost(vha, fcport, 1, 0);
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
+ sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
done_free_sp:
sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
"Async done-%s res %x %8phC\n",
sp->name, res, sp->fcport->port_name);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+
memset(&ea, 0, sizeof(ea));
ea.event = FCME_ADISC_DONE;
ea.rc = res;
done_free_sp:
sp->free(sp);
done:
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
(loop_id & 0x7fff));
}
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- vha->gnl.sent = 0;
+ spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
INIT_LIST_HEAD(&h);
fcport = tf = NULL;
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
+ spin_lock(&vha->hw->tgt.sess_lock);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ spin_unlock(&vha->hw->tgt.sess_lock);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create new fcport if fw has knowledge of new sessions */
for (i = 0; i < n; i++) {
port_id_t id;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+ if (!list_empty(&fcport->gnl_entry)) {
+ spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ rval = QLA_SUCCESS;
+ goto done;
+ }
+
+ spin_lock(&vha->hw->tgt.sess_lock);
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
+ spin_unlock(&vha->hw->tgt.sess_lock);
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
- if (vha->gnl.sent) {
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- return QLA_SUCCESS;
- }
- vha->gnl.sent = 1;
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
return rval;
if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
- fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
fcport->fw_login_state == DSC_LS_PRLI_PEND)
return rval;
fc_port_t *fcport = ea->fcport;
struct port_database_24xx *pd;
struct srb *sp = ea->sp;
+ uint8_t ls;
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
if (fcport->disc_state == DSC_DELETE_PEND)
return;
- switch (pd->current_login_state) {
+ if (fcport->fc4f_nvme)
+ ls = pd->current_login_state >> 4;
+ else
+ ls = pd->current_login_state & 0xf;
+
+ switch (ls) {
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
break;
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ if ((fcport->loop_id != FC_NO_LOOP_ID) &&
+ ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
+ case DSC_LOGIN_PEND:
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ qla24xx_post_prli_work(vha, fcport);
+ break;
+
default:
break;
}
srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ del_timer(&sp->u.iocb_cmd.timer);
complete(&abt->u.abt.comp);
}
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
+ if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
+ (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
+ break;
+ }
+
if (ea->fcport->n2n_flag) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post fc4 prli\n",
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
fcport->login_gen++;
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return;
}
qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (data[0] == MBS_COMMAND_COMPLETE) {
qla2x00_update_fcport(vha, fcport);
}
/* Retry login. */
- fcport->flags &= ~FCF_ASYNC_SENT;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
/**
* qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_isp_firmware() - Choose firmware image.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_reset_chip() - Reset ISP chip.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla24xx_reset_chip() - Reset ISP24xx chip.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_chip_diag() - Test chip for proper operation.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/* Assume a failed state */
rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_init, vha, 0x007b,
- "Testing device at %lx.\n", (u_long)®->flash_address);
+ ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
+ ®->flash_address);
spin_lock_irqsave(&ha->hardware_lock, flags);
/**
* qla24xx_chip_diag() - Test ISP24xx for proper operation.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_init_response_q_entries() - Initializes response queue entries.
- * @ha: HA context
+ * @rsp: response queue
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
/**
* qla2x00_update_fw_options() - Read and process firmware options.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_init_rings() - Initializes firmware.
- * @ha: HA context
+ * @vha: HA context
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
/**
* qla2x00_fw_ready() - Waits for firmware ready.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
/**
* qla2x00_alloc_fcport() - Allocate a generic fcport.
- * @ha: HA context
+ * @vha: HA context
* @flags: allocation flags
*
* Returns a pointer to the allocated fcport, or NULL, if none available.
fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2005,
- "iIDMA adjusted to %s GB/s on %8phN.\n",
+ "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
qla2x00_get_link_speed_str(ha, fcport->fp_speed),
- fcport->port_name);
+ fcport->fp_speed, fcport->port_name);
}
}
fcport->deleted = 0;
fcport->logout_on_delete = 1;
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+ qla2x00_iidma_fcport(vha, fcport);
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
return;
}
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
reg_port:
qlt_do_generation_tick(vha, &discovery_gen);
if (USE_ASYNC_SCAN(ha)) {
- rval = QLA_SUCCESS;
- rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
+ NULL);
if (rval)
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else {
break;
}
+ if (fcport->fc4f_nvme) {
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ fcport->disc_state = DSC_GNL;
+ vha->fcport_count--;
+ fcport->login_succ = 0;
+ }
+ }
+
if (found) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
continue;
qpair->vp_idx = vp_idx;
qpair->fw_started = ha->flags.fw_started;
INIT_LIST_HEAD(&qpair->hints_list);
- INIT_LIST_HEAD(&qpair->nvme_done_list);
qpair->chip_reset = ha->base_qpair->chip_reset;
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
qpair->enable_explicit_conf =
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
- * @cmd: SCSI command
+ * @sp: SCSI command
*
* Returns the proper CF_* direction based on CDB.
*/
/**
* qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
- * @ha: HA context
+ * @vha: HA context
*
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
/**
* qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
- * @ha: HA context
+ * @vha: HA context
+ * @req: request queue
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
/**
* qla2x00_start_iocbs() - Execute the IOCB command
+ * @vha: HA context
+ * @req: request queue
*/
void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
- * @ha: HA context
+ * @vha: HA context
+ * @req: request queue
+ * @rsp: response queue
* @loop_id: loop ID
* @lun: LUN
* @type: marker modifier
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @tot_prot_dsds:
+ * @fw_prot_opts:
*/
inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint32_t dif_bytes;
uint8_t bundling = 1;
uint16_t blk_size;
- uint8_t *clr_ptr;
struct crc_context *crc_ctx_pkt = NULL;
struct qla_hw_data *ha;
uint8_t additional_fcpcdb_len;
/* Allocate CRC context from global pool */
crc_ctx_pkt = sp->u.scmd.ctx =
- dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+ dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
goto crc_queuing_error;
- /* Zero out CTX area. */
- clr_ptr = (uint8_t *)crc_ctx_pkt;
- memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
-
crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
sp->flags |= SRB_CRC_CTX_DMA_VALID;
}
memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
if (!ctx->fcp_cmnd) {
ql_log(ql_log_fatal, vha, 0x3011,
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
/* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
/**
* qla2x00_mbx_completion() - Process mailbox command completions.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
static void
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
else
/**
* qla2x00_async_event() - Process aynchronous events.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @mb: Mailbox registers (0 - 3)
*/
void
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ha->flags.lip_ae = 1;
- ha->flags.n2n_ae = 0;
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
case MBA_LOOP_DOWN: /* Loop Down Event */
SAVE_TOPO(ha);
- ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
/* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */
ha->flags.lip_ae = 0;
- ha->flags.n2n_ae = 1;
if (IS_QLA2100(ha))
break;
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @req: request queue
* @index: SRB index
*/
void
sp->done(sp, 0);
}
-static void
-qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ void *tsk, srb_t *sp)
{
- const char func[] = "NVME-IOCB";
fc_port_t *fcport;
- srb_t *sp;
struct srb_iocb *iocb;
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
uint16_t ret = 0;
- struct srb_iocb *nvme;
-
- sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
- if (!sp)
- return;
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
state_flags = le16_to_cpu(sts->state_flags);
fd = iocb->u.nvme.desc;
- nvme = &sp->u.iocb_cmd;
- if (unlikely(nvme->u.nvme.aen_op))
+ if (unlikely(iocb->u.nvme.aen_op))
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
/*
fd->transferred_length = fd->payload_length -
le32_to_cpu(sts->residual_len);
- /*
- * If transport error then Failure (HBA rejects request)
- * otherwise transport will handle.
- */
- if (sts->entry_status) {
- ql_log(ql_log_warn, fcport->vha, 0x5038,
- "NVME-%s error - hdl=%x entry-status(%x).\n",
- sp->name, sp->handle, sts->entry_status);
+ switch (le16_to_cpu(sts->comp_status)) {
+ case CS_COMPLETE:
+ ret = QLA_SUCCESS;
+ break;
+ case CS_ABORTED:
+ case CS_RESET:
+ case CS_PORT_UNAVAILABLE:
+ case CS_PORT_LOGGED_OUT:
+ case CS_PORT_BUSY:
+ ql_log(ql_log_warn, fcport->vha, 0x5060,
+ "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
+ sp->name, sp->handle, sts->comp_status,
+ le32_to_cpu(sts->residual_len), sts->ox_id);
+ fd->transferred_length = 0;
+ iocb->u.nvme.rsp_pyld_len = 0;
+ ret = QLA_ABORTED;
+ break;
+ default:
+ ql_log(ql_log_warn, fcport->vha, 0x5060,
+ "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
+ sp->name, sp->handle, sts->comp_status,
+ le32_to_cpu(sts->residual_len), sts->ox_id);
ret = QLA_FUNCTION_FAILED;
- } else {
- switch (le16_to_cpu(sts->comp_status)) {
- case CS_COMPLETE:
- ret = 0;
- break;
-
- case CS_ABORTED:
- case CS_RESET:
- case CS_PORT_UNAVAILABLE:
- case CS_PORT_LOGGED_OUT:
- case CS_PORT_BUSY:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
- fd->transferred_length = fd->payload_length;
- ret = QLA_ABORTED;
- break;
-
- default:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
- ret = QLA_FUNCTION_FAILED;
- break;
- }
+ break;
}
sp->done(sp, ret);
}
/**
* qla2x00_process_response_queue() - Process response queue entries.
- * @ha: SCSI driver HA context
+ * @rsp: response queue
*/
void
qla2x00_process_response_queue(struct rsp_que *rsp)
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*/
static void
/* NVME completion. */
if (sp->type == SRB_NVME_CMD) {
- qla24xx_nvme_iocb_entry(vha, req, pkt);
+ req->outstanding_cmds[handle] = NULL;
+ qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
return;
}
/**
* qla2x00_status_cont_entry() - Process a Status Continuations entry.
- * @ha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*
* Extended sense data.
/**
* qla2x00_error_entry() - Process an error entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
* return : 1=allow further error analysis. 0=no additional error analysis.
*/
/**
* qla24xx_mbx_completion() - Process mailbox command completions.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
static void
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
else
/**
* qla24xx_process_response_queue() - Process response queue entries.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
*/
void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct rsp_que *rsp)
}
pr_warn(" cmd=%x ****\n", command);
}
- ql_dbg(ql_dbg_mbx, vha, 0x1198,
- "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(®->isp24.host_status),
- RD_REG_DWORD(®->isp24.ictrl),
- RD_REG_DWORD(®->isp24.istatus));
+ if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1198,
+ "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
+ RD_REG_DWORD(®->isp24.host_status),
+ RD_REG_DWORD(®->isp24.ictrl),
+ RD_REG_DWORD(®->isp24.istatus));
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1206,
+ "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
+ RD_REG_WORD(®->isp.ctrl_status),
+ RD_REG_WORD(®->isp.ictrl),
+ RD_REG_WORD(®->isp.istatus));
+ }
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
* FW supports nvme and driver load parameter requested nvme.
* BIT 26 of fw_attributes indicates NVMe support.
*/
- if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
+ if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
vha->flags.nvme_enabled = 1;
-
+ ql_log(ql_log_info, vha, 0xd302,
+ "%s: FC-NVMe is Enabled (0x%x)\n",
+ __func__, ha->fw_attributes_h);
+ }
}
if (IS_QLA27XX(ha)) {
/**
* qla2x00_set_serdes_params() -
- * @ha: HA context
+ * @vha: HA context
+ * @sw_em_1g:
+ * @sw_em_2g:
+ * @sw_em_4g:
*
* Returns
*/
id.b.area = rptid_entry->port_id[1];
id.b.al_pa = rptid_entry->port_id[0];
id.b.rsvd_1 = 0;
+ ha->flags.n2n_ae = 0;
if (rptid_entry->format == 0) {
/* loop */
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ ha->flags.n2n_ae = 1;
return;
}
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
+ ha->flags.n2n_ae = 1;
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
- INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
- return 1;
+ return 0;
fail_qpair_map:
kfree(ha->base_qpair);
static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
{
+ if (!ha->req_q_map)
+ return;
+
if (IS_QLAFX00(ha)) {
if (req && req->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
- if (req)
+ if (req) {
kfree(req->outstanding_cmds);
-
- kfree(req);
+ kfree(req);
+ }
}
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
+ if (!ha->rsp_q_map)
+ return;
+
if (IS_QLAFX00(ha)) {
- if (rsp && rsp->ring)
+ if (rsp && rsp->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(rsp->length_fx00 + 1) * sizeof(request_t),
rsp->ring_fx00, rsp->dma_fx00);
(rsp->length + 1) * sizeof(response_t),
rsp->ring, rsp->dma);
}
- kfree(rsp);
+ if (rsp)
+ kfree(rsp);
}
static void qla2x00_free_queues(struct qla_hw_data *ha)
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
+ if (!ha->req_q_map)
+ return;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
req = qp->req;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp_get(sp);
spin_unlock_irqrestore(qp->qp_lock_ptr,
flags);
- qla_nvme_abort(ha, sp);
+ qla_nvme_abort(ha, sp, res);
spin_lock_irqsave(qp->qp_lock_ptr,
flags);
} else if (GET_CMD_SP(sp) &&
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_hw_failed;
+ goto probe_failed;
/* Alloc arrays of request and response ring ptrs */
- if (!qla2x00_alloc_queues(ha, req, rsp)) {
+ if (qla2x00_alloc_queues(ha, req, rsp)) {
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers..."
"aborting.\n");
- goto probe_init_failed;
+ goto probe_failed;
}
if (ha->mqenable && shost_use_blk_mq(host)) {
return 0;
- probe_init_failed:
- qla2x00_free_req_que(ha, req);
- ha->req_q_map[0] = NULL;
- clear_bit(0, ha->req_qid_map);
- qla2x00_free_rsp_que(ha, rsp);
- ha->rsp_q_map[0] = NULL;
- clear_bit(0, ha->rsp_qid_map);
- ha->max_req_queues = ha->max_rsp_queues = 0;
-
probe_failed:
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
}
qla2x00_wait_for_hba_ready(base_vha);
+ qla2x00_wait_for_sess_deletion(base_vha);
+
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
- vfree(ha->optrom_buffer);
- kfree(ha->nvram);
- kfree(ha->npiv_info);
- kfree(ha->swl);
- kfree(ha->loop_id_map);
+
+ if (ha->optrom_buffer)
+ vfree(ha->optrom_buffer);
+ if (ha->nvram)
+ kfree(ha->nvram);
+ if (ha->npiv_info)
+ kfree(ha->npiv_info);
+ if (ha->swl)
+ kfree(ha->swl);
+ if (ha->loop_id_map)
+ kfree(ha->loop_id_map);
ha->srb_mempool = NULL;
ha->ctx_mempool = NULL;
ha->ex_init_cb_dma = 0;
ha->async_pd = NULL;
ha->async_pd_dma = 0;
+ ha->loop_id_map = NULL;
+ ha->npiv_info = NULL;
+ ha->optrom_buffer = NULL;
+ ha->swl = NULL;
+ ha->nvram = NULL;
+ ha->mctp_dump = NULL;
+ ha->dcbx_tlv = NULL;
+ ha->xgmac_data = NULL;
+ ha->sfp_data = NULL;
ha->s_dma_pool = NULL;
ha->dl_dma_pool = NULL;
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
+ spin_lock_init(&vha->gnl.fcports_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) {
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) {
+
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
+
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
} else {
}
qlt_plogi_ack_unref(vha, pla);
} else {
+ fc_port_t *dfcp = NULL;
+
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
tfcp = qla2x00_find_fcport_by_nportid(vha,
&e->u.new_sess.id, 1);
default:
fcport->login_pause = 1;
tfcp->conflict = fcport;
- qlt_schedule_sess_for_deletion(tfcp);
+ dfcp = tfcp;
break;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ if (dfcp)
+ qlt_schedule_sess_for_deletion(tfcp);
wwn = wwn_to_u64(fcport->node_name);
e->u.logio.data);
break;
case QLA_EVT_GPNFT:
- qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
+ e->u.gpnft.sp);
break;
case QLA_EVT_GPNFT_DONE:
qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
logo->cmd_count, res);
}
-static void qlt_free_session_done(struct work_struct *work)
+void qlt_free_session_done(struct work_struct *work)
{
struct fc_port *sess = container_of(work, struct fc_port,
free_work);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
- if (sess->nvme_flag & NVME_FLAG_REGISTERED)
+ if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
+ !(sess->nvme_flag & NVME_FLAG_DELETING)) {
+ sess->nvme_flag |= NVME_FLAG_DELETING;
schedule_work(&sess->nvme_del_work);
-
- INIT_WORK(&sess->free_work, qlt_free_session_done);
- schedule_work(&sess->free_work);
+ } else {
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+ }
}
EXPORT_SYMBOL(qlt_unreg_sess);
}
}
- /* ha->tgt.sess_lock supposed to be held on entry */
void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = sess->vha->hw;
unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
}
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
- spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
}
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->disc_state = DSC_DELETE_PEND;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- /* use cancel to push work element through before re-queue */
- cancel_work_sync(&sess->del_work);
INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
- queue_work(sess->vha->hw->wq, &sess->del_work);
+ WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
- /* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
{
struct fc_port *sess;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qlt_schedule_sess_for_deletion(sess);
}
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
* Lock is needed, because we still can get an incoming packet.
*/
mutex_lock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
tgt->tgt_stop = 1;
qlt_clear_tgt_db(tgt);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
mutex_unlock(&qla_tgt_mutex);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
- "qla_target(%d): task abort for non-existant session\n",
+ "qla_target(%d): task abort for non-existent session\n",
vha->vp_idx);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
uint32_t data_bytes;
uint32_t dif_bytes;
uint8_t bundling = 1;
- uint8_t *clr_ptr;
struct crc_context *crc_ctx_pkt = NULL;
struct qla_hw_data *ha;
struct ctio_crc2_to_fw *pkt;
/* Allocate CRC context from global pool */
crc_ctx_pkt = cmd->ctx =
- dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+ dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
goto crc_queuing_error;
- /* Zero out CTX area. */
- clr_ptr = (uint8_t *)crc_ctx_pkt;
- memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
-
crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
- flags);
break;
}
/**
* qla_tgt_lport_register - register lport with external module
*
- * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
- * @wwpn: Passwd FC target WWPN
- * @callback: lport initialization callback for tcm_qla2xxx code
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ * @phys_wwpn:
+ * @npiv_wwpn:
+ * @npiv_wwnn:
+ * @callback: lport initialization callback for tcm_qla2xxx code
*/
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
u64 npiv_wwpn, u64 npiv_wwnn,
static struct scsi_transport_template *qla4xxx_scsi_transport;
+ static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+ {
+ u32 reg_val = 0;
+ int rval = QLA_SUCCESS;
+
+ if (is_qla8022(ha))
+ reg_val = readl(&ha->qla4_82xx_reg->host_status);
+ else if (is_qla8032(ha) || is_qla8042(ha))
+ reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ else
+ reg_val = readw(&ha->reg->ctrl_status);
+
+ if (reg_val == QL4_ISP_REG_DISCONNECT)
+ rval = QLA_ERROR;
+
+ return rval;
+ }
+
static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
uint32_t iface_type, uint32_t payload_size,
uint32_t pid, struct sockaddr *dst_addr)
uint32_t chap_size;
int ret = 0;
- chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
+ chap_table = dma_pool_zalloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
if (chap_table == NULL)
return -ENOMEM;
- memset(chap_table, 0, sizeof(struct ql4_chap_table));
-
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
+ int rval;
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
ha->host_no, id, lun, cmd, cmd->cmnd[0]);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int ret = FAILED, stat;
+ int rval;
if (!ddb_entry)
return ret;
cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
/* FIXME: wait for hba to go online */
stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
if (stat != QLA_SUCCESS) {
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int stat, ret;
+ int rval;
if (!ddb_entry)
return FAILED;
ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
stat = qla4xxx_reset_target(ha, ddb_entry);
if (stat != QLA_SUCCESS) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
{
int return_status = FAILED;
struct scsi_qla_host *ha;
+ int rval;
ha = to_qla_host(cmd->device->host);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
+ *
+ * Note: this function must be called only for a command that has timed out.
+ * Because the block layer marks a request as complete before it calls
+ * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
+ * timed out do not have any effect. Hence it is safe to call
+ * scsi_finish_command() from this function.
*/
void
scmd_eh_abort_handler(struct work_struct *work)
static void scsi_eh_inc_host_failed(struct rcu_head *head)
{
- struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
+ struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
* Ensure that all tasks observe the host state change before the
* host_failed change.
*/
- call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
+ call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
}
/**
}
return FAILED;
- maybe_retry:
+maybe_retry:
/* we requeue for retry because the error was retryable, and
* the request was not marked fast fail. Note that above,
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
- SCSI_SENSE_BUFFERSIZE, 0,
- SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
+ SCSI_SENSE_BUFFERSIZE, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache)
ret = -ENOMEM;
} else {
scsi_sense_cache =
- kmem_cache_create("scsi_sense_cache",
- SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
+ kmem_cache_create_usercopy("scsi_sense_cache",
+ SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
+ 0, SCSI_SENSE_BUFFERSIZE, NULL);
if (!scsi_sense_cache)
ret = -ENOMEM;
}
*/
cmd->result = 0;
if (q->mq_ops) {
- scsi_mq_requeue_cmd(cmd);
+ /*
+ * Before a SCSI command is dispatched,
+ * get_device(&sdev->sdev_gendev) is called and the host,
+ * target and device busy counters are increased. Since
+ * requeuing a request causes these actions to be repeated and
+ * since scsi_device_unbusy() has already been called,
+ * put_device(&device->sdev_gendev) must still be called. Call
+ * put_device() after blk_mq_requeue_request() to avoid that
+ * removal of the SCSI device can start before requeueing has
+ * happened.
+ */
+ blk_mq_requeue_request(cmd->request, true);
+ put_device(&device->sdev_gendev);
return;
}
spin_lock_irqsave(q->queue_lock, flags);
if (!blk_rq_is_scsi(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
cmd->flags &= ~SCMD_INITIALIZED;
+ destroy_rcu_head(&cmd->rcu);
}
if (req->mq_ctx) {
int result)
{
switch (host_byte(result)) {
+ case DID_OK:
+ return BLK_STS_OK;
case DID_TRANSPORT_FAILFAST:
return BLK_STS_TRANSPORT;
case DID_TARGET_FAILURE:
/* for passthrough error may be set */
error = BLK_STS_OK;
}
+ /*
+ * Another corner case: the SCSI status byte is non-zero but 'good'.
+ * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
+ * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
+ * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
+ * intermediate statuses (both obsolete in SAM-4) as good.
+ */
+ if (status_byte(result) && scsi_status_is_good(result)) {
+ result = 0;
+ error = BLK_STS_OK;
+ }
/*
* special case: failed zero length commands always need to
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
scsi_req_init(&cmd->req);
+ init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
out_put_device:
put_device(&sdev->sdev_gendev);
out:
+ if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
+ blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
}
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- if (atomic_read(&sdev->device_busy) == 0 &&
- !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
+ if (atomic_read(&sdev->device_busy) ||
+ scsi_device_blocked(sdev))
+ ret = BLK_STS_DEV_RESOURCE;
break;
default:
/*
q->limits.cluster = 0;
/*
- * set a reasonable default alignment on word boundaries: the
- * host and device may alter it using
- * blk_queue_update_dma_alignment() later.
+ * Set a reasonable default alignment: The larger of 32-byte (dword),
+ * which is a common minimum for HBAs, and the minimum DMA alignment,
+ * which is set by the platform.
+ *
+ * Devices that require a bigger alignment can increase it later.
*/
- blk_queue_dma_alignment(q, 0x03);
+ blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
- timeout, retries, NULL);
+ timeout, 1, NULL);
if (sdev->removable && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9);
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
- int ret;
if (!(rq->cmd_flags & REQ_NOUNMAP)) {
switch (sdkp->zeroing_mode) {
case SD_ZERO_WS16_UNMAP:
- ret = sd_setup_write_same16_cmnd(cmd, true);
- goto out;
+ return sd_setup_write_same16_cmnd(cmd, true);
case SD_ZERO_WS10_UNMAP:
- ret = sd_setup_write_same10_cmnd(cmd, true);
- goto out;
+ return sd_setup_write_same10_cmnd(cmd, true);
}
}
return BLKPREP_INVALID;
if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
- ret = sd_setup_write_same16_cmnd(cmd, false);
- else
- ret = sd_setup_write_same10_cmnd(cmd, false);
-
-out:
- if (sd_is_zoned(sdkp) && ret == BLKPREP_OK)
- return sd_zbc_write_lock_zone(cmd);
+ return sd_setup_write_same16_cmnd(cmd, false);
- return ret;
+ return sd_setup_write_same10_cmnd(cmd, false);
}
static void sd_config_write_same(struct scsi_disk *sdkp)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
- if (sd_is_zoned(sdkp)) {
- ret = sd_zbc_write_lock_zone(cmd);
- if (ret != BLKPREP_OK)
- return ret;
- }
-
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes;
- if (sd_is_zoned(sdkp) && ret != BLKPREP_OK)
- sd_zbc_write_unlock_zone(cmd);
-
return ret;
}
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
unsigned int dif, dix;
- bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
int ret;
unsigned char protect;
- if (zoned_write) {
- ret = sd_zbc_write_lock_zone(SCpnt);
- if (ret != BLKPREP_OK)
- return ret;
- }
-
ret = scsi_init_io(SCpnt);
if (ret != BLKPREP_OK)
- goto out;
+ return ret;
WARN_ON_ONCE(SCpnt != rq->special);
/* from here on until we're complete, any goto out
*/
ret = BLKPREP_OK;
out:
- if (zoned_write && ret != BLKPREP_OK)
- sd_zbc_write_unlock_zone(SCpnt);
-
return ret;
}
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
-
- if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
- sd_zbc_write_unlock_zone(SCpnt);
+ u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ cmnd = SCpnt->cmnd;
SCpnt->cmnd = NULL;
SCpnt->cmd_len = 0;
+ mempool_free(cmnd, sd_cdb_pool);
}
}
sector_size = old_sector_size;
goto got_data;
}
+ /* Remember that READ CAPACITY(16) succeeded */
+ sdp->try_rc_10_first = 0;
}
}
int res;
struct scsi_device *sdp = sdkp->device;
struct scsi_mode_data data;
+ int disk_ro = get_disk_ro(sdkp->disk);
int old_wp = sdkp->write_prot;
set_disk_ro(sdkp->disk, 0);
"Test WP failed, assume Write Enabled\n");
} else {
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
- set_disk_ro(sdkp->disk, sdkp->write_prot);
+ set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
}
-/**
- * sd_zbc_zone_no - Get the number of the zone conataining a sector.
- * @sdkp: The target disk
- * @sector: 512B sector address contained in the zone
- */
-static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
- sector_t sector)
-{
- return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
-}
-
/**
* sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
* @cmd: the command to setup
return BLKPREP_OK;
}
-/**
- * sd_zbc_write_lock_zone - Write lock a sequential zone.
- * @cmd: write command
- *
- * Called from sd_init_cmd() for write requests (standard write, write same or
- * write zeroes operations). If the request target zone is not already locked,
- * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
- * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
- * forcing the request to wait for the zone to be unlocked, that is, for the
- * previously issued write request targeting the same zone to complete.
- *
- * This is called from blk_peek_request() context with the queue lock held and
- * before the request is removed from the scheduler. As a result, multiple
- * contexts executing concurrently scsi_request_fn() cannot result in write
- * sequence reordering as only a single write request per zone is allowed to
- * proceed.
- */
-int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t sector = blk_rq_pos(rq);
- sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
- unsigned int zno = sd_zbc_zone_no(sdkp, sector);
-
- /*
- * Note: Checks of the alignment of the write command on
- * logical blocks is done in sd.c
- */
-
- /* Do not allow zone boundaries crossing on host-managed drives */
- if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
- (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
- return BLKPREP_KILL;
-
- /*
- * Do not issue more than one write at a time per
- * zone. This solves write ordering problems due to
- * the unlocking of the request queue in the dispatch
- * path in the non scsi-mq case.
- */
- if (sdkp->zones_wlock &&
- test_and_set_bit(zno, sdkp->zones_wlock))
- return BLKPREP_DEFER;
-
- WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
- cmd->flags |= SCMD_ZONE_WRITE_LOCK;
-
- return BLKPREP_OK;
-}
-
-/**
- * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
- * @cmd: write command
- *
- * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
- * dispatching the next write request for the zone.
- */
-void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
-{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-
- if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
- unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
- WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
- cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
- clear_bit_unlock(zno, sdkp->zones_wlock);
- smp_mb__after_atomic();
- }
-}
-
/**
* sd_zbc_complete - ZBC command post processing.
* @cmd: Completed command
*/
static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
- u64 zone_blocks;
+ u64 zone_blocks = 0;
sector_t block = 0;
unsigned char *buf;
unsigned char *rec;
/* Do a report zone to get the same field */
ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
- if (ret) {
- zone_blocks = 0;
- goto out;
- }
+ if (ret)
+ goto out_free;
same = buf[4] & 0x0f;
if (same > 0) {
ret = sd_zbc_report_zones(sdkp, buf,
SD_ZBC_BUF_SIZE, block);
if (ret)
- return ret;
+ goto out_free;
}
} while (block < sdkp->capacity);
zone_blocks = sdkp->zone_blocks;
out:
- kfree(buf);
-
if (!zone_blocks) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Devices with non constant zone "
"size are not supported\n");
- return -ENODEV;
- }
-
- if (!is_power_of_2(zone_blocks)) {
+ ret = -ENODEV;
+ } else if (!is_power_of_2(zone_blocks)) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Devices with non power of 2 zone "
"size are not supported\n");
- return -ENODEV;
- }
-
- if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ ret = -ENODEV;
+ } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- return -ENODEV;
+ ret = -ENODEV;
+ } else {
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->zone_shift = ilog2(zone_blocks);
}
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = ilog2(zone_blocks);
+ out_free:
+ kfree(buf);
- return 0;
+ return ret;
}
+/**
+ * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
+ * @sdkp: The disk of the bitmap
+ */
+static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+
+ return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
+ * sizeof(unsigned long),
+ GFP_KERNEL, q->node);
+}
+
+/**
+ * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
+ * @sdkp: disk used
+ * @buf: report reply buffer
+ * @buflen: length of @buf
+ * @seq_zones_bitmap: bitmap of sequential zones to set
+ *
+ * Parse reported zone descriptors in @buf to identify sequential zones and
+ * set the reported zone bit in @seq_zones_bitmap accordingly.
+ * Since read-only and offline zones cannot be written, do not
+ * mark them as sequential in the bitmap.
+ * Return the LBA after the last zone reported.
+ */
+static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen,
+ unsigned long *seq_zones_bitmap)
+{
+ sector_t lba, next_lba = sdkp->capacity;
+ unsigned int buf_len, list_length;
+ unsigned char *rec;
+ u8 type, cond;
+
+ list_length = get_unaligned_be32(&buf[0]) + 64;
+ buf_len = min(list_length, buflen);
+ rec = buf + 64;
+
+ while (rec < buf + buf_len) {
+ type = rec[0] & 0x0f;
+ cond = (rec[1] >> 4) & 0xf;
+ lba = get_unaligned_be64(&rec[16]);
+ if (type != ZBC_ZONE_TYPE_CONV &&
+ cond != ZBC_ZONE_COND_READONLY &&
+ cond != ZBC_ZONE_COND_OFFLINE)
+ set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
+ next_lba = lba + get_unaligned_be64(&rec[8]);
+ rec += 64;
+ }
+
+ return next_lba;
+}
+
+/**
+ * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
+ * @sdkp: target disk
+ *
+ * Allocate a zone bitmap and initialize it by identifying sequential zones.
+ */
+static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned long *seq_zones_bitmap;
+ sector_t lba = 0;
+ unsigned char *buf;
+ int ret = -ENOMEM;
+
+ seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
+ if (!seq_zones_bitmap)
+ return -ENOMEM;
+
+ buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ while (lba < sdkp->capacity) {
+ ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
+ if (ret)
+ goto out;
+ lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+ seq_zones_bitmap);
+ }
+
+ if (lba != sdkp->capacity) {
+ /* Something went wrong */
+ ret = -EIO;
+ }
+
+out:
+ kfree(buf);
+ if (ret) {
+ kfree(seq_zones_bitmap);
+ return ret;
+ }
+
+ q->seq_zones_bitmap = seq_zones_bitmap;
+
+ return 0;
+}
+
+static void sd_zbc_cleanup(struct scsi_disk *sdkp)
+{
+ struct request_queue *q = sdkp->disk->queue;
+
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+
+ q->nr_zones = 0;
+}
+
static int sd_zbc_setup(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
+ int ret;
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
sdkp->nr_zones =
round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
- if (!sdkp->zones_wlock) {
- sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!sdkp->zones_wlock)
- return -ENOMEM;
+ /*
+ * Initialize the device request queue information if the number
+ * of zones changed.
+ */
+ if (sdkp->nr_zones != q->nr_zones) {
+
+ sd_zbc_cleanup(sdkp);
+
+ q->nr_zones = sdkp->nr_zones;
+ if (sdkp->nr_zones) {
+ q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
+ if (!q->seq_zones_wlock) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
+ if (ret) {
+ sd_zbc_cleanup(sdkp);
+ goto err;
+ }
+ }
+
}
return 0;
+
+err:
+ sd_zbc_cleanup(sdkp);
+ return ret;
}
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
err:
sdkp->capacity = 0;
+ sd_zbc_cleanup(sdkp);
return ret;
}
void sd_zbc_remove(struct scsi_disk *sdkp)
{
- kfree(sdkp->zones_wlock);
- sdkp->zones_wlock = NULL;
+ sd_zbc_cleanup(sdkp);
}
void sd_zbc_print_zones(struct scsi_disk *sdkp)
case TEST_UNIT_READY:
break;
default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_ERROR);
}
break;
case SRB_STATUS_INVALID_LUN:
+ set_host_byte(scmnd, DID_NO_CONNECT);
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
*/
cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
cpumask_of_node(cpu_to_node(q_num)));
- for_each_cpu(tgt_cpu, &alloced_mask) {
+ for_each_cpu_wrap(tgt_cpu, &alloced_mask,
+ outgoing_channel->target_cpu + 1) {
if (tgt_cpu != outgoing_channel->target_cpu) {
outgoing_channel =
stor_device->stor_chns[tgt_cpu];
.eh_timed_out = storvsc_eh_timed_out,
.slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
- .cmd_per_lun = 255,
+ .cmd_per_lun = 2048,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
+#include "ufs-sysfs.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
#define ufshcd_is_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
{UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
/**
* ufshcd_get_intr_mask - Get the interrupt bit mask
- * @hba - Pointer to adapter instance
+ * @hba: Pointer to adapter instance
*
* Returns interrupt bit mask per version
*/
/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
- * @hba - Pointer to adapter instance
+ * @hba: Pointer to adapter instance
*
* Returns UFSHCI version supported by the controller
*/
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
- * @lrb: pointer to local command reference block
+ * @lrbp: pointer to local command reference block
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
? false : true;
}
-static const char *ufschd_uic_link_state_to_string(
- enum uic_link_state state)
-{
- switch (state) {
- case UIC_LINK_OFF_STATE: return "OFF";
- case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
- case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
- default: return "UNKNOWN";
- }
-}
-
-static const char *ufschd_ufs_dev_pwr_mode_to_string(
- enum ufs_dev_pwr_mode state)
-{
- switch (state) {
- case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
- case UFS_SLEEP_PWR_MODE: return "SLEEP";
- case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
- default: return "UNKNOWN";
- }
-}
-
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrb - pointer to local reference block
+ * @lrbp: pointer to local reference block
*/
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
* ufshcd_copy_query_response() - Copy the Query Response and the data
* descriptor
* @hba: per adapter instance
- * @lrb - pointer to local reference block
+ * @lrbp: pointer to local reference block
*/
static
int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/**
* ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
* @hba: per adapter instance
- * @uic_command: UIC command
+ * @uic_cmd: UIC command
*
* Must be called with mutex held.
* Returns 0 only if success.
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
- * @lrbp - pointer to local reference block
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
*
* Returns 0 in case of success, non-zero value in case of failure
*/
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
- * @lrbp - local reference block pointer
- * @upiu_flags - flags
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
*/
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
/**
* ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
- * @hba - per adapter instance
- * @lrb - pointer to local reference block
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
*/
static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
/**
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
- * @hba - per adapter instance
- * @lrb - pointer to local reference block
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
return ret;
}
-/*
- * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
- * @scsi_lun: scsi LUN id
- *
- * Returns UPIU LUN id
- */
-static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
-{
- if (scsi_is_wlun(scsi_lun))
- return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
- | UFS_UPIU_WLUN_ID;
- else
- return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
- * @scsi_lun: UPIU W-LUN id
+ * @upiu_wlun_id: UPIU W-LUN id
*
* Returns SCSI W-LUN id
*/
/**
* ufshcd_queuecommand - main entry point for SCSI requests
+ * @host: SCSI host pointer
* @cmd: command from SCSI Midlayer
- * @done: call back function
*
* Returns 0 for success, non-zero in case of failure
*/
/**
* ufshcd_get_dev_cmd_tag - Get device management command tag
* @hba: per-adapter instance
- * @tag: pointer to variable with available slot value
+ * @tag_out: pointer to variable with available slot value
*
* Get a free slot and lock it until device management command
* completes.
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
- * @hba - UFS hba
- * @cmd_type - specifies the type (NOP, Query...)
- * @timeout - time in seconds
+ * @hba: UFS hba
+ * @cmd_type: specifies the type (NOP, Query...)
+ * @timeout: time in seconds
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
/**
* ufshcd_query_flag() - API function for sending flag query requests
- * hba: per-adapter instance
- * query_opcode: flag query to perform
- * idn: flag idn to access
- * flag_res: the flag value after the query request completes
+ * @hba: per-adapter instance
+ * @opcode: flag query to perform
+ * @idn: flag idn to access
+ * @flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
/**
* ufshcd_query_attr - API function for sending attribute requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * attr_val: the attribute value after the query request completes
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
}
/**
- * ufshcd_query_descriptor_retry - API function for sending descriptor
- * requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * desc_buf: the buffer that contains the descriptor
- * buf_len: length parameter passed to the device
+ * ufshcd_query_descriptor_retry - API function for sending descriptor requests
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @desc_buf: the buffer that contains the descriptor
+ * @buf_len: length parameter passed to the device
*
* Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*/
-static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len)
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len)
{
int err;
int retries;
case QUERY_DESC_IDN_STRING:
*desc_len = QUERY_DESC_MAX_SIZE;
break;
+ case QUERY_DESC_IDN_HEALTH:
+ *desc_len = hba->desc_size.hlth_desc;
+ break;
case QUERY_DESC_IDN_RFU_0:
case QUERY_DESC_IDN_RFU_1:
*desc_len = 0;
*
* Return 0 in case of success, non-zero otherwise
*/
-static int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size)
+int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ u8 param_offset,
+ u8 *param_read_buf,
+ u8 param_size)
{
int ret;
u8 *desc_buf;
*
* Return 0 in case of success, non-zero otherwise
*/
-#define ASCII_STD true
-static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
- u8 *buf, u32 size, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+ u8 *buf, u32 size, bool ascii)
{
int err = 0;
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+ if (!ufs_is_valid_unit_desc_lun(lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
return ret;
}
+static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
/**
* ufshcd_init_pwr_info - setting the POR (power on reset)
* values in hba power info
/**
* ufshcd_complete_dev_init() - checks device readiness
- * hba: per-adapter instance
+ * @hba: per-adapter instance
*
* Set fDeviceInit flag and poll until device toggles it.
*/
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
ufshcd_set_queue_depth(sdev);
/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrb: pointer to local reference block of completed command
+ * @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
*
* Returns value base on SCSI command status
/**
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
- * @lrb: pointer to local reference block of completed command
+ * @lrbp: pointer to local reference block of completed command
*
* Returns result of the command to notify SCSI midlayer
*/
/**
* ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
- * @cmd - SCSI command pointer
+ * @cmd: SCSI command pointer
*
* Returns SUCCESS/FAILED
*/
* will take effect only when its sent to "UFS device" well known logical unit
* hence we require the scsi_device instance to represent this logical unit in
* order for the UFS host driver to send the SSU command for power management.
-
+ *
* We also require the scsi_device instance for "RPMB" (Replay Protected Memory
* Block) LU so user space process can control this LU. User space may also
* want to have access to BOOT LU.
-
+ *
* This function adds scsi device instances for each of all well known LUs
* (except "REPORT LUNS" LU).
*
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
- QUERY_DESC_MAX_SIZE, ASCII_STD);
+ QUERY_DESC_MAX_SIZE, true/*ASCII*/);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
+ &hba->desc_size.hlth_desc);
+ if (err)
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+ hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
/**
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
}
+static const struct attribute_group *ufshcd_driver_groups[] = {
+ &ufs_sysfs_unit_descriptor_group,
+ &ufs_sysfs_lun_attributes_group,
+ NULL,
+};
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.can_queue = UFSHCD_CAN_QUEUE,
.max_host_blocked = 1,
.track_queue_depth = 1,
+ .sdev_groups = ufshcd_driver_groups,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
struct ufs_vreg *vreg, bool on)
{
int ret = 0;
- struct regulator *reg = vreg->reg;
- const char *name = vreg->name;
+ struct regulator *reg;
+ const char *name;
int min_uV, uA_load;
BUG_ON(!vreg);
+ reg = vreg->reg;
+ name = vreg->name;
+
if (regulator_count_voltages(reg) > 0) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
+
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
goto out;
set_old_link_state:
/**
* ufshcd_system_suspend - system suspend routine
* @hba: per adapter instance
- * @pm_op: runtime PM or system PM
*
* Check the description of ufshcd_suspend() function for more details.
*
}
EXPORT_SYMBOL(ufshcd_runtime_idle);
-static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count,
- bool rpm)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- if (value >= UFS_PM_LVL_MAX)
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (rpm)
- hba->rpm_lvl = value;
- else
- hba->spm_lvl = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int curr_len;
- u8 lvl;
-
- curr_len = snprintf(buf, PAGE_SIZE,
- "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
- hba->rpm_lvl,
- ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
- ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].link_state));
-
- curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
- "\nAll available Runtime PM levels info:\n");
- for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
- curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
- "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
- lvl,
- ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[lvl].dev_state),
- ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[lvl].link_state));
-
- return curr_len;
-}
-
-static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
-}
-
-static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
-{
- hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
- hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
- sysfs_attr_init(&hba->rpm_lvl_attr.attr);
- hba->rpm_lvl_attr.attr.name = "rpm_lvl";
- hba->rpm_lvl_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
- dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
-}
-
-static ssize_t ufshcd_spm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int curr_len;
- u8 lvl;
-
- curr_len = snprintf(buf, PAGE_SIZE,
- "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
- hba->spm_lvl,
- ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].dev_state),
- ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].link_state));
-
- curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
- "\nAll available System PM levels info:\n");
- for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
- curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
- "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
- lvl,
- ufschd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[lvl].dev_state),
- ufschd_uic_link_state_to_string(
- ufs_pm_lvl_states[lvl].link_state));
-
- return curr_len;
-}
-
-static ssize_t ufshcd_spm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
-}
-
-static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
-{
- hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
- hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
- sysfs_attr_init(&hba->spm_lvl_attr.attr);
- hba->spm_lvl_attr.attr.name = "spm_lvl";
- hba->spm_lvl_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->spm_lvl_attr))
- dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
-}
-
-static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
-{
- ufshcd_add_rpm_lvl_sysfs_nodes(hba);
- ufshcd_add_spm_lvl_sysfs_nodes(hba);
-}
-
-static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
-{
- device_remove_file(hba->dev, &hba->rpm_lvl_attr);
- device_remove_file(hba->dev, &hba->spm_lvl_attr);
-}
-
/**
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
- * @hba - per adapter instance
+ * @hba: per adapter instance
*/
void ufshcd_remove(struct ufs_hba *hba)
{
- ufshcd_remove_sysfs_nodes(hba);
+ ufs_sysfs_remove_nodes(hba->dev);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ /* Set the default auto-hiberate idle timer value to 150 ms */
+ if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
+ }
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
- ufshcd_add_sysfs_nodes(hba);
+ ufs_sysfs_add_nodes(hba->dev);
return 0;
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
-#define SCMD_ZONE_WRITE_LOCK (1 << 2)
-#define SCMD_INITIALIZED (1 << 3)
+#define SCMD_INITIALIZED (1 << 2)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
struct list_head list; /* scsi_cmnd participates in queue lists */
struct list_head eh_entry; /* entry for the host eh_cmd_q */
struct delayed_work abort_work;
+
+ struct rcu_head rcu;
+
int eh_eflags; /* Used by error handlr */
/*
struct module *module;
const char *name;
- /*
- * Used to initialize old-style drivers. For new-style drivers
- * just perform all work in your module initialization function.
- *
- * Status: OBSOLETE
- */
- int (* detect)(struct scsi_host_template *);
-
- /*
- * Used as unload callback for hosts with old-style drivers.
- *
- * Status: OBSOLETE
- */
- int (* release)(struct Scsi_Host *);
-
/*
* The info function will return whatever useful information the
* developer sees fit. If not provided, then the name field will
/* True if the controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* True if the low-level driver supports blk-mq only */
+ unsigned force_blk_mq:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
struct device_attribute **sdev_attrs;
/*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
+ * Pointer to the SCSI device attribute groups for this host,
+ * NULL terminated.
*/
- struct list_head legacy_hosts;
+ const struct attribute_group **sdev_groups;
/*
* Vendor Identifier associated with the host
struct blk_mq_tag_set tag_set;
};
- struct rcu_head rcu;
-
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
/* ldm bits */
struct device shost_gendev, shost_dev;
- /*
- * List of hosts per template.
- *
- * This is only for use by scsi_module.c for legacy templates.
- * For these access to it is synchronized implicitly by
- * module_init/module_exit.
- */
- struct list_head sht_legacy_list;
-
/*
* Points to the transport data (if any) which is allocated
* separately
return shost->prot_guard_type;
}
-/* legacy interfaces */
-extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
-extern void scsi_unregister(struct Scsi_Host *);
extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
#endif /* _SCSI_SCSI_HOST_H */