* the host adapter emulator.
*/
-//#define DEBUG_SCSI
-
-#ifdef DEBUG_SCSI
-#define DPRINTF(fmt, ...) \
-do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) do {} while(0)
-#endif
-
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "qemu/module.h"
#include "hw/scsi/scsi.h"
+#include "migration/qemu-file-types.h"
+#include "migration/vmstate.h"
+#include "hw/scsi/emulation.h"
#include "scsi/constants.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
#include "hw/block/block.h"
#include "sysemu/dma.h"
#include "qemu/cutils.h"
+#include "trace.h"
#ifdef __linux
#include <scsi/sg.h>
DMAIOFunc *dma_readv;
DMAIOFunc *dma_writev;
bool (*need_fua_emulation)(SCSICommand *cmd);
+ void (*update_sense)(SCSIRequest *r);
} SCSIDiskClass;
typedef struct SCSIDiskReq {
char *serial;
char *vendor;
char *product;
+ char *device_id;
bool tray_open;
bool tray_locked;
/*
/* Helper function for command completion with sense. */
static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
{
- DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n",
- r->req.tag, sense.key, sense.asc, sense.ascq);
+ trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
+ sense.ascq);
scsi_req_build_sense(&r->req, sense);
scsi_req_complete(&r->req, CHECK_CONDITION);
}
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
-static void scsi_read_complete(void * opaque, int ret)
+static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
{
- SCSIDiskReq *r = (SCSIDiskReq *)opaque;
- SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- int n;
+ uint32_t n;
- assert(r->req.aiocb != NULL);
- r->req.aiocb = NULL;
- aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
- if (scsi_disk_req_check_error(r, ret, true)) {
+ assert(r->req.aiocb == NULL);
+ if (scsi_disk_req_check_error(r, ret, false)) {
goto done;
}
- block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
- DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size);
-
n = r->qiov.size / 512;
r->sector += n;
r->sector_count -= n;
done:
scsi_req_unref(&r->req);
+}
+
+static void scsi_read_complete(void *opaque, int ret)
+{
+ SCSIDiskReq *r = (SCSIDiskReq *)opaque;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+
+ aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
+ if (ret < 0) {
+ block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ } else {
+ block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
+ trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
+ }
+ scsi_read_complete_noio(r, ret);
aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
}
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
bool first;
- DPRINTF("Read sector_count=%d\n", r->sector_count);
+ trace_scsi_disk_read_data_count(r->sector_count);
if (r->sector_count == 0) {
/* This also clears the sense buffer for REQUEST SENSE. */
scsi_req_complete(&r->req, GOOD);
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
- DPRINTF("Data transfer direction invalid\n");
- scsi_read_complete(r, -EINVAL);
+ trace_scsi_disk_read_data_invalid();
+ scsi_read_complete_noio(r, -EINVAL);
return;
}
if (!blk_is_available(req->dev->conf.blk)) {
- scsi_read_complete(r, -ENOMEDIUM);
+ scsi_read_complete_noio(r, -ENOMEDIUM);
return;
}
{
bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
is_read, error);
}
switch (error) {
case 0:
- /* The command has run, no need to fake sense. */
+ /* A passthrough command has run and has produced sense data; check
+ * whether the error has to be handled by the guest or should rather
+ * pause the host.
+ */
assert(r->status && *r->status);
- scsi_req_complete(&r->req, *r->status);
+ if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
+ /* These errors are handled by guest. */
+ sdc->update_sense(&r->req);
+ scsi_req_complete(&r->req, *r->status);
+ return true;
+ }
+ error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
break;
case ENOMEDIUM:
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
break;
}
}
- if (!error) {
- assert(r->status && *r->status);
- error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
- if (error == ECANCELED || error == EAGAIN || error == ENOTCONN ||
- error == 0) {
- /* These errors are handled by guest. */
- scsi_req_complete(&r->req, *r->status);
- return true;
- }
+ blk_error_action(s->qdev.conf.blk, action, is_read, error);
+ if (action == BLOCK_ERROR_ACTION_IGNORE) {
+ scsi_req_complete(&r->req, 0);
+ return true;
}
- blk_error_action(s->qdev.conf.blk, action, is_read, error);
if (action == BLOCK_ERROR_ACTION_STOP) {
scsi_req_retry(&r->req);
}
- return action != BLOCK_ERROR_ACTION_IGNORE;
+ return true;
}
static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
return;
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
- DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
+ trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
scsi_req_data(&r->req, r->qiov.size);
}
/* The request is used as the AIO opaque value, so add a ref. */
scsi_req_ref(&r->req);
if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
- DPRINTF("Data transfer direction invalid\n");
+ trace_scsi_disk_write_data_invalid();
scsi_write_complete_noio(r, -EINVAL);
return;
}
return (uint8_t *)r->iov.iov_base;
}
-int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
+static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
uint8_t page_code = req->cmd.buf[2];
switch (page_code) {
case 0x00: /* Supported page codes, mandatory */
{
- DPRINTF("Inquiry EVPD[Supported pages] "
- "buffer size %zd\n", req->cmd.xfer);
+ trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
if (s->serial) {
outbuf[buflen++] = 0x80; /* unit serial number */
int l;
if (!s->serial) {
- DPRINTF("Inquiry (EVPD[Serial number] not supported\n");
+ trace_scsi_disk_emulate_vpd_page_80_not_supported();
return -1;
}
l = 36;
}
- DPRINTF("Inquiry EVPD[Serial number] "
- "buffer size %zd\n", req->cmd.xfer);
+ trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
memcpy(outbuf + buflen, s->serial, l);
buflen += l;
break;
case 0x83: /* Device identification page, mandatory */
{
- const char *str = s->serial ?: blk_name(s->qdev.conf.blk);
- int max_len = s->serial ? 20 : 255 - 8;
- int id_len = strlen(str);
+ int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
- if (id_len > max_len) {
- id_len = max_len;
- }
- DPRINTF("Inquiry EVPD[Device identification] "
- "buffer size %zd\n", req->cmd.xfer);
+ trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
- outbuf[buflen++] = 0x2; /* ASCII */
- outbuf[buflen++] = 0; /* not officially assigned */
- outbuf[buflen++] = 0; /* reserved */
- outbuf[buflen++] = id_len; /* length of data following */
- memcpy(outbuf + buflen, str, id_len);
- buflen += id_len;
+ if (id_len) {
+ outbuf[buflen++] = 0x2; /* ASCII */
+ outbuf[buflen++] = 0; /* not officially assigned */
+ outbuf[buflen++] = 0; /* reserved */
+ outbuf[buflen++] = id_len; /* length of data following */
+ memcpy(outbuf + buflen, s->device_id, id_len);
+ buflen += id_len;
+ }
if (s->qdev.wwn) {
outbuf[buflen++] = 0x1; /* Binary */
}
case 0xb0: /* block limits */
{
- unsigned int unmap_sectors =
+ SCSIBlockLimits bl = {};
+
+ if (s->qdev.type == TYPE_ROM) {
+ trace_scsi_disk_emulate_vpd_page_b0_not_supported();
+ return -1;
+ }
+ bl.wsnz = 1;
+ bl.unmap_sectors =
s->qdev.conf.discard_granularity / s->qdev.blocksize;
- unsigned int min_io_size =
+ bl.min_io_size =
s->qdev.conf.min_io_size / s->qdev.blocksize;
- unsigned int opt_io_size =
+ bl.opt_io_size =
s->qdev.conf.opt_io_size / s->qdev.blocksize;
- unsigned int max_unmap_sectors =
+ bl.max_unmap_sectors =
s->max_unmap_size / s->qdev.blocksize;
- unsigned int max_io_sectors =
+ bl.max_io_sectors =
s->max_io_size / s->qdev.blocksize;
+ /* 255 descriptors fit in 4 KiB with an 8-byte header */
+ bl.max_unmap_descr = 255;
- if (s->qdev.type == TYPE_ROM) {
- DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n",
- page_code);
- return -1;
- }
if (s->qdev.type == TYPE_DISK) {
int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
int max_io_sectors_blk =
max_transfer_blk / s->qdev.blocksize;
- max_io_sectors =
- MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors);
-
- /* min_io_size and opt_io_size can't be greater than
- * max_io_sectors */
- if (min_io_size) {
- min_io_size = MIN(min_io_size, max_io_sectors);
- }
- if (opt_io_size) {
- opt_io_size = MIN(opt_io_size, max_io_sectors);
- }
+ bl.max_io_sectors =
+ MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
}
- /* required VPD size with unmap support */
- buflen = 0x40;
- memset(outbuf + 4, 0, buflen - 4);
-
- outbuf[4] = 0x1; /* wsnz */
-
- /* optimal transfer length granularity */
- outbuf[6] = (min_io_size >> 8) & 0xff;
- outbuf[7] = min_io_size & 0xff;
-
- /* maximum transfer length */
- outbuf[8] = (max_io_sectors >> 24) & 0xff;
- outbuf[9] = (max_io_sectors >> 16) & 0xff;
- outbuf[10] = (max_io_sectors >> 8) & 0xff;
- outbuf[11] = max_io_sectors & 0xff;
-
- /* optimal transfer length */
- outbuf[12] = (opt_io_size >> 24) & 0xff;
- outbuf[13] = (opt_io_size >> 16) & 0xff;
- outbuf[14] = (opt_io_size >> 8) & 0xff;
- outbuf[15] = opt_io_size & 0xff;
-
- /* max unmap LBA count, default is 1GB */
- outbuf[20] = (max_unmap_sectors >> 24) & 0xff;
- outbuf[21] = (max_unmap_sectors >> 16) & 0xff;
- outbuf[22] = (max_unmap_sectors >> 8) & 0xff;
- outbuf[23] = max_unmap_sectors & 0xff;
-
- /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header */
- outbuf[24] = 0;
- outbuf[25] = 0;
- outbuf[26] = 0;
- outbuf[27] = 255;
-
- /* optimal unmap granularity */
- outbuf[28] = (unmap_sectors >> 24) & 0xff;
- outbuf[29] = (unmap_sectors >> 16) & 0xff;
- outbuf[30] = (unmap_sectors >> 8) & 0xff;
- outbuf[31] = unmap_sectors & 0xff;
-
- /* max write same size */
- outbuf[36] = 0;
- outbuf[37] = 0;
- outbuf[38] = 0;
- outbuf[39] = 0;
-
- outbuf[40] = (max_io_sectors >> 24) & 0xff;
- outbuf[41] = (max_io_sectors >> 16) & 0xff;
- outbuf[42] = (max_io_sectors >> 8) & 0xff;
- outbuf[43] = max_io_sectors & 0xff;
+ buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
break;
}
case 0xb1: /* block device characteristics */
dbd = (r->req.cmd.buf[1] & 0x8) != 0;
page = r->req.cmd.buf[2] & 0x3f;
page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
- DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n",
- (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control);
+
+ trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
+ 10, page, r->req.cmd.xfer, page_control);
memset(outbuf, 0, r->req.cmd.xfer);
p = outbuf;
format = req->cmd.buf[2] & 0xf;
start_track = req->cmd.buf[6];
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
- DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1);
+ trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
nb_sectors /= s->qdev.blocksize / 512;
switch (format) {
case 0:
int buflen = r->iov.iov_len;
if (buflen) {
- DPRINTF("Read buf_len=%d\n", buflen);
+ trace_scsi_disk_emulate_read_data(buflen);
r->iov.iov_len = 0;
r->started = true;
scsi_req_data(&r->req, buflen);
if (r->iov.iov_len) {
int buflen = r->iov.iov_len;
- DPRINTF("Write buf_len=%d\n", buflen);
+ trace_scsi_disk_emulate_write_data(buflen);
r->iov.iov_len = 0;
scsi_req_data(&r->req, buflen);
return;
case SERVICE_ACTION_IN_16:
/* Service Action In subcommands. */
if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
- DPRINTF("SAI READ CAPACITY(16)\n");
+ trace_scsi_disk_emulate_command_SAI_16();
memset(outbuf, 0, req->cmd.xfer);
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
if (!nb_sectors) {
/* Protection, exponent and lowest lba field left blank. */
break;
}
- DPRINTF("Unsupported Service Action In\n");
+ trace_scsi_disk_emulate_command_SAI_unsupported();
goto illegal_request;
case SYNCHRONIZE_CACHE:
/* The request is used as the AIO opaque value, so add a ref. */
r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
return 0;
case SEEK_10:
- DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
+ trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
if (r->req.cmd.lba > s->qdev.max_lba) {
goto illegal_lba;
}
break;
case MODE_SELECT:
- DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
+ trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
break;
case MODE_SELECT_10:
- DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer);
+ trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
break;
case UNMAP:
- DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer);
+ trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
break;
case VERIFY_10:
case VERIFY_12:
case VERIFY_16:
- DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3);
+ trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
if (req->cmd.buf[1] & 6) {
goto illegal_request;
}
break;
case WRITE_SAME_10:
case WRITE_SAME_16:
- DPRINTF("WRITE SAME %d (len %lu)\n",
- req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16,
- (unsigned long)r->req.cmd.xfer);
+ trace_scsi_disk_emulate_command_WRITE_SAME(
+ req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
break;
default:
- DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0],
- scsi_command_name(buf[0]));
+ trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
+ scsi_command_name(buf[0]));
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return 0;
}
case READ_10:
case READ_12:
case READ_16:
- DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
+ trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
/* Protection information is not supported. For SCSI versions 2 and
* older (as determined by snooping the guest's INQUIRY commands),
* there is no RD/WR/VRPROTECT, so skip this check in these versions.
scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
return 0;
}
- DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
+ trace_scsi_disk_dma_command_WRITE(
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
/* fall through */
static void scsi_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
+ bool read_only;
if (!s->qdev.conf.blk) {
error_setg(errp, "drive property not set");
return;
}
+ if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
+ !s->qdev.hba_supports_iothread)
+ {
+ error_setg(errp, "HBA does not support iothreads");
+ return;
+ }
+
if (dev->type == TYPE_DISK) {
if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
return;
}
}
- if (!blkconf_apply_backend_options(&dev->conf,
- blk_is_read_only(s->qdev.conf.blk),
+
+ read_only = blk_is_read_only(s->qdev.conf.blk);
+ if (dev->type == TYPE_ROM) {
+ read_only = true;
+ }
+
+ if (!blkconf_apply_backend_options(&dev->conf, read_only,
dev->type == TYPE_DISK, errp)) {
return;
}
if (!s->vendor) {
s->vendor = g_strdup("QEMU");
}
+ if (!s->device_id) {
+ if (s->serial) {
+ s->device_id = g_strdup_printf("%.20s", s->serial);
+ } else {
+ const char *str = blk_name(s->qdev.conf.blk);
+ if (str && *str) {
+ s->device_id = g_strdup(str);
+ }
+ }
+ }
if (blk_is_sg(s->qdev.conf.blk)) {
error_setg(errp, "unwanted /dev/sg*");
static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
+ AioContext *ctx = NULL;
/* can happen for devices without drive. The error message for missing
* backend will be issued in scsi_realize
*/
if (s->qdev.conf.blk) {
+ ctx = blk_get_aio_context(s->qdev.conf.blk);
+ aio_context_acquire(ctx);
blkconf_blocksizes(&s->qdev.conf);
}
s->qdev.blocksize = s->qdev.conf.logical_block_size;
s->product = g_strdup("QEMU HARDDISK");
}
scsi_realize(&s->qdev, errp);
+ if (ctx) {
+ aio_context_release(ctx);
+ }
}
static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
+ AioContext *ctx;
int ret;
if (!dev->conf.blk) {
/* Anonymous BlockBackend for an empty drive. As we put it into
* dev->conf, qdev takes care of detaching on unplug. */
- dev->conf.blk = blk_new(0, BLK_PERM_ALL);
+ dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
assert(ret == 0);
}
+ ctx = blk_get_aio_context(dev->conf.blk);
+ aio_context_acquire(ctx);
s->qdev.blocksize = 2048;
s->qdev.type = TYPE_ROM;
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
s->product = g_strdup("QEMU CD-ROM");
}
scsi_realize(&s->qdev, errp);
+ aio_context_release(ctx);
}
static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
[WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
};
+static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
+{
+ int i;
+ int len = scsi_cdb_length(buf);
+ char *line_buffer, *p;
+
+ line_buffer = g_malloc(len * 5 + 1);
+
+ for (i = 0, p = line_buffer; i < len; i++) {
+ p += sprintf(p, " 0x%02x", buf[i]);
+ }
+ trace_scsi_disk_new_request(lun, tag, line_buffer);
+
+ g_free(line_buffer);
+}
+
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
uint8_t *buf, void *hba_private)
{
}
req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
-#ifdef DEBUG_SCSI
- DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
- {
- int i;
- for (i = 1; i < scsi_cdb_length(buf); i++) {
- printf(" 0x%02x", buf[i]);
- }
- printf("\n");
+ if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
+ scsi_disk_new_request_dump(lun, tag, buf);
}
-#endif
return req;
}
static void scsi_block_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
+ AioContext *ctx;
int sg_version;
int rc;
return;
}
+ if (s->rotation_rate) {
+ error_report_once("rotation_rate is specified for scsi-block but is "
+ "not implemented. This option is deprecated and will "
+ "be removed in a future version");
+ }
+
+ ctx = blk_get_aio_context(s->qdev.conf.blk);
+ aio_context_acquire(ctx);
+
/* check we are using a driver managing SG_IO (version 3 and after) */
rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
if (rc < 0) {
if (rc != -EPERM) {
error_append_hint(errp, "Is this a SCSI device?\n");
}
- return;
+ goto out;
}
if (sg_version < 30000) {
error_setg(errp, "scsi generic interface too old");
- return;
+ goto out;
}
/* get device type from INQUIRY data */
rc = get_device_type(s);
if (rc < 0) {
error_setg(errp, "INQUIRY failed");
- return;
+ goto out;
}
/* Make a guess for the block size, we'll fix it when the guest sends.
scsi_realize(&s->qdev, errp);
scsi_generic_read_device_inquiry(&s->qdev);
+
+out:
+ aio_context_release(ctx);
}
typedef struct SCSIBlockReq {
}
}
+static void scsi_block_update_sense(SCSIRequest *req)
+{
+ SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
+ SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
+ r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
+}
#endif
static
.abstract = true,
};
-#define DEFINE_SCSI_DISK_PROPERTIES() \
- DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
- DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
- DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
- DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
- DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
- DEFINE_PROP_STRING("product", SCSIDiskState, product)
+#define DEFINE_SCSI_DISK_PROPERTIES() \
+ DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
+ DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
+ DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
+ DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
+ DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
+ DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
+ DEFINE_PROP_STRING("product", SCSIDiskState, product), \
+ DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
+
static Property scsi_hd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
sc->parse_cdb = scsi_block_parse_cdb;
sdc->dma_readv = scsi_block_dma_readv;
sdc->dma_writev = scsi_block_dma_writev;
+ sdc->update_sense = scsi_block_update_sense;
sdc->need_fua_emulation = scsi_block_no_fua;
dc->desc = "SCSI block device passthrough";
dc->props = scsi_block_properties;