rc = -ENODEV;
goto unlock;
}
- if (dev->class != ATA_DEV_ATA) {
+ if (dev->class != ATA_DEV_ATA &&
+ dev->class != ATA_DEV_ZAC) {
rc = -EOPNOTSUPP;
goto unlock;
}
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth);
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
+ scsi_change_queue_depth(sdev, depth);
}
blk_queue_flush_queueable(q, false);
* @ap: ATA port to which the device change the queue depth
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
- * @reason: calling context
*
* libsas and libata have different approaches for associating a sdev to
* its ata_port.
*
*/
int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth, int reason)
+ int queue_depth)
{
struct ata_device *dev;
unsigned long flags;
- if (reason != SCSI_QDEPTH_DEFAULT)
- return -EOPNOTSUPP;
-
if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
if (sdev->queue_depth == queue_depth)
return -EINVAL;
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
- return queue_depth;
+ return scsi_change_queue_depth(sdev, queue_depth);
}
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
- * @reason: calling context
*
* This is libata standard hostt->change_queue_depth callback.
* SCSI will call into this callback when user tries to set queue
* RETURNS:
* Newly configured queue depth.
*/
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
- int reason)
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- return __ata_change_queue_depth(ap, sdev, queue_depth, reason);
+ return __ata_change_queue_depth(ap, sdev, queue_depth);
}
/**
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
const u8 versions[] = {
+ 0x00,
0x60, /* SAM-3 (no version claimed) */
0x03,
0x02,
0x60 /* SPC-3 (no version claimed) */
};
+ const u8 versions_zbc[] = {
+ 0x00,
+ 0xA0, /* SAM-5 (no version claimed) */
+
+ 0x04,
+ 0xC0, /* SBC-3 (no version claimed) */
+
+ 0x04,
+ 0x60, /* SPC-4 (no version claimed) */
+
+ 0x60,
+ 0x20, /* ZBC (no version claimed) */
+ };
+
u8 hdr[] = {
TYPE_DISK,
0,
if (ata_id_removeable(args->id))
hdr[1] |= (1 << 7);
+ if (args->dev->class == ATA_DEV_ZAC) {
+ hdr[0] = TYPE_ZBC;
+ hdr[2] = 0x6; /* ZBC is defined in SPC-4 */
+ }
+
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- memcpy(rbuf + 59, versions, sizeof(versions));
+ if (args->dev->class == ATA_DEV_ZAC)
+ memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
+ else
+ memcpy(rbuf + 58, versions, sizeof(versions));
return 0;
}
DPRINTK("ATAPI request sense\n");
- /* FIXME: is this needed? */
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
#ifdef CONFIG_ATA_SFF
ata_xlat_func_t xlat_func;
int rc = 0;
- if (dev->class == ATA_DEV_ATA) {
+ if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
break;
- case SERVICE_ACTION_IN:
+ case SERVICE_ACTION_IN_16:
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
if (unlikely(task->ata_task.device_control_reg_update))
scb->header.opcode = CONTROL_ATA_DEV;
- else if (dev->sata_dev.command_set == ATA_COMMAND_SET)
- scb->header.opcode = INITIATE_ATA_TASK;
- else
+ else if (dev->sata_dev.class == ATA_DEV_ATAPI)
scb->header.opcode = INITIATE_ATAPI_TASK;
+ else
+ scb->header.opcode = INITIATE_ATA_TASK;
scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
if (dev->port->oob_mode == SAS_OOB_MODE)
if (likely(!task->ata_task.device_control_reg_update))
scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
16);
scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
if (task->ata_task.dma_xfer)
flags |= DATA_XFER_MODE_DMA;
if (task->ata_task.use_ncq &&
- dev->sata_dev.command_set != ATAPI_COMMAND_SET)
+ dev->sata_dev.class != ATA_DEV_ATAPI)
flags |= ATA_Q_TYPE_NCQ;
flags |= data_dir_flags[task->data_dir];
scb->ata_task.ata_flags = flags;
return res;
}
-int asd_execute_task(struct sas_task *task, const int num,
- gfp_t gfp_flags)
+int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
{
int res = 0;
LIST_HEAD(alist);
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
unsigned long flags;
- res = asd_can_queue(asd_ha, num);
+ res = asd_can_queue(asd_ha, 1);
if (res)
return res;
- res = num;
+ res = 1;
ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
if (res) {
res = -ENOMEM;
list_for_each_entry(a, &alist, list) {
a->uldd_task = t;
t->lldd_task = a;
- t = list_entry(t->list.next, struct sas_task, list);
+ break;
}
list_for_each_entry(a, &alist, list) {
t = a->uldd_task;
}
list_del_init(&alist);
- res = asd_post_ascb_list(asd_ha, ascb, num);
+ res = asd_post_ascb_list(asd_ha, ascb, 1);
if (unlikely(res)) {
a = NULL;
__list_add(&alist, ascb->list.prev, &ascb->list);
out_err:
if (ascb)
asd_ascb_free_list(ascb);
- asd_can_dequeue(asd_ha, num);
+ asd_can_dequeue(asd_ha, 1);
return res;
}
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
- dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
+ dev->sata_dev.class == ATA_DEV_ATAPI))) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
spin_unlock_irqrestore(ap->lock, flags);
qc_already_gone:
- list_del_init(&task->list);
sas_free_task(task);
}
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
- if (sas_ha->lldd_max_execute_num < 2)
- ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
- else
- ret = sas_queue_up(task);
-
- /* Examine */
+ ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
if (ret) {
SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
return to_sas_internal(dev->port->ha->core.shost->transportt);
}
- static void sas_get_ata_command_set(struct domain_device *dev);
+ static int sas_get_ata_command_set(struct domain_device *dev);
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
}
memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
- /* TODO switch to ata_dev_classify() */
- sas_get_ata_command_set(dev);
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
}
return 0;
}
if (ret && ret != -EAGAIN)
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
- /* XXX: if the class changes during the reset the upper layer
- * should be informed, if the device has gone away we assume
- * libsas will eventually delete it
- */
- switch (dev->sata_dev.command_set) {
- case ATA_COMMAND_SET:
- *class = ATA_DEV_ATA;
- break;
- case ATAPI_COMMAND_SET:
- *class = ATA_DEV_ATAPI;
- break;
- }
+ *class = dev->sata_dev.class;
ap->cbl = ATA_CBL_SATA;
return ret;
return;
out:
- list_del_init(&task->list);
sas_free_task(task);
}
complete(waiting);
}
- static void sas_get_ata_command_set(struct domain_device *dev)
+ static int sas_get_ata_command_set(struct domain_device *dev)
{
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
+ struct ata_taskfile tf;
if (dev->dev_type == SAS_SATA_PENDING)
- return;
+ return ATA_DEV_UNKNOWN;
+
+ ata_tf_from_fis((const u8 *)fis, &tf);
- if ((fis->sector_count == 1 && /* ATA */
- fis->lbal == 1 &&
- fis->lbam == 0 &&
- fis->lbah == 0 &&
- fis->device == 0)
- ||
- (fis->sector_count == 0 && /* CE-ATA (mATA) */
- fis->lbal == 0 &&
- fis->lbam == 0xCE &&
- fis->lbah == 0xAA &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATA_COMMAND_SET;
-
- else if ((fis->interrupt_reason == 1 && /* ATAPI */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x14 &&
- fis->byte_count_high == 0xEB &&
- (fis->device & ~0x10) == 0))
-
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
-
- else if ((fis->sector_count == 1 && /* SEMB */
- fis->lbal == 1 &&
- fis->lbam == 0x3C &&
- fis->lbah == 0xC3 &&
- fis->device == 0)
- ||
- (fis->interrupt_reason == 1 && /* SATA PM */
- fis->lbal == 1 &&
- fis->byte_count_low == 0x69 &&
- fis->byte_count_high == 0x96 &&
- (fis->device & ~0x10) == 0))
-
- /* Treat it as a superset? */
- dev->sata_dev.command_set = ATAPI_COMMAND_SET;
+ return ata_dev_classify(&tf);
}
void sas_probe_sata(struct asd_sas_port *port)
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
- sas_get_ata_command_set(dev);
+ dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
res = sas_notify_lldd_dev_found(dev);
if (task->ata_task.use_ncq)
flags |= MCH_FPDMA;
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+ if (dev->sata_dev.class == ATA_DEV_ATAPI) {
if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
flags |= MCH_ATAPI;
}
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
/* fill in command FIS and ATAPI CDB */
memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
- if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+ if (dev->sata_dev.class == ATA_DEV_ATAPI)
memcpy(buf_cmd + STP_ATAPI_CMD,
task->ata_task.atapi_packet, 16);
return rc;
}
-static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
-{
- struct mvs_task_list *first = NULL;
-
- for (; *num > 0; --*num) {
- struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
-
- if (!mvs_list)
- break;
-
- INIT_LIST_HEAD(&mvs_list->list);
- if (!first)
- first = mvs_list;
- else
- list_add_tail(&mvs_list->list, &first->list);
-
- }
-
- return first;
-}
-
-static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
-{
- LIST_HEAD(list);
- struct list_head *pos, *a;
- struct mvs_task_list *mlist = NULL;
-
- __list_add(&list, mvs_list->list.prev, &mvs_list->list);
-
- list_for_each_safe(pos, a, &list) {
- list_del_init(pos);
- mlist = list_entry(pos, struct mvs_task_list, list);
- kmem_cache_free(mvs_task_list_cache, mlist);
- }
-}
-
-static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
+static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct completion *completion, int is_tmf,
struct mvs_tmf_task *tmf)
{
return rc;
}
-static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
- struct completion *completion, int is_tmf,
- struct mvs_tmf_task *tmf)
-{
- struct domain_device *dev = task->dev;
- struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
- struct mvs_info *mvi = NULL;
- struct sas_task *t = task;
- struct mvs_task_list *mvs_list = NULL, *a;
- LIST_HEAD(q);
- int pass[2] = {0};
- u32 rc = 0;
- u32 n = num;
- unsigned long flags = 0;
-
- mvs_list = mvs_task_alloc_list(&n, gfp_flags);
- if (n) {
- printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
- rc = -ENOMEM;
- goto free_list;
- }
-
- __list_add(&q, mvs_list->list.prev, &mvs_list->list);
-
- list_for_each_entry(a, &q, list) {
- a->task = t;
- t = list_entry(t->list.next, struct sas_task, list);
- }
-
- list_for_each_entry(a, &q , list) {
-
- t = a->task;
- mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
-
- spin_lock_irqsave(&mvi->lock, flags);
- rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
- if (rc)
- dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
- spin_unlock_irqrestore(&mvi->lock, flags);
- }
-
- if (likely(pass[0]))
- MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
- (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
-
- if (likely(pass[1]))
- MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
- (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
-
- list_del_init(&q);
-
-free_list:
- if (mvs_list)
- mvs_task_free_list(mvs_list);
-
- return rc;
-}
-
-int mvs_queue_command(struct sas_task *task, const int num,
- gfp_t gfp_flags)
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- struct mvs_device *mvi_dev = task->dev->lldd_dev;
- struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
-
- if (sas->lldd_max_execute_num < 2)
- return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
- else
- return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
+ return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
}
static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
- res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
+ res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
if (res) {
del_timer(&task->slow_task->timer);
ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
ATA_DEV_SEMB = 7, /* SEMB */
ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
- ATA_DEV_NONE = 9, /* no device */
+ ATA_DEV_ZAC = 9, /* ZAC device */
+ ATA_DEV_NONE = 10, /* no device */
/* struct ata_link flags */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
- int queue_depth, int reason);
+ int queue_depth);
extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth, int reason);
+ int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
- class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
+ class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
+ class == ATA_DEV_ZAC;
}
static inline unsigned int ata_class_disabled(unsigned int class)
};
/* ---------- SATA device ---------- */
- enum ata_command_set {
- ATA_COMMAND_SET = 0,
- ATAPI_COMMAND_SET = 1,
- };
-
#define ATA_RESP_FIS_SIZE 24
struct sata_device {
- enum ata_command_set command_set;
- struct smp_resp rps_resp; /* report_phy_sata_resp */
- u8 port_no; /* port number, if this is a PM (Port) */
+ unsigned int class;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+ u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
struct ata_host ata_host;
struct scsi_core {
struct Scsi_Host *shost;
- struct mutex task_queue_flush;
- spinlock_t task_queue_lock;
- struct list_head task_queue;
- int task_queue_size;
-
- struct task_struct *queue_thread;
};
struct sas_ha_event {
struct asd_sas_port **sas_port; /* array of valid pointers, must be set */
int num_phys; /* must be set, gt 0, static */
- /* The class calls this to send a task for execution. */
- int lldd_max_execute_num;
- int lldd_queue_size;
int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
* their siblings when forming wide ports */
struct sas_task {
struct domain_device *dev;
- struct list_head list;
spinlock_t task_state_lock;
unsigned task_state_flags;
int (*lldd_dev_found)(struct domain_device *);
void (*lldd_dev_gone)(struct domain_device *);
- int (*lldd_execute_task)(struct sas_task *, int num,
- gfp_t gfp_flags);
+ int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags);
/* Task Management Functions. Must be called from process context. */
int (*lldd_abort_task)(struct sas_task *);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
-int sas_queue_up(struct sas_task *task);
extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
extern int sas_slave_configure(struct scsi_device *);
-extern int sas_change_queue_depth(struct scsi_device *, int new_depth,
- int reason);
+extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
extern int sas_change_queue_type(struct scsi_device *, int qt);
extern int sas_bios_param(struct scsi_device *,
struct block_device *,