SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
if (ret < 0) {
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- if (r->req.aiocb != NULL) {
- r->req.aiocb = NULL;
- bdrv_acct_done(s->qdev.conf.bs, &r->acct);
- }
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+ bdrv_acct_done(s->qdev.conf.bs, &r->acct);
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret)) {
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
int n;
- if (r->req.aiocb != NULL) {
- r->req.aiocb = NULL;
- bdrv_acct_done(s->qdev.conf.bs, &r->acct);
- }
+ assert(r->req.aiocb != NULL);
+ r->req.aiocb = NULL;
+ bdrv_acct_done(s->qdev.conf.bs, &r->acct);
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret)) {
*/
static int scsi_handle_rw_error(SCSIDiskReq *r, int error)
{
- int is_read = (r->req.cmd.xfer == SCSI_XFER_FROM_DEV);
+ bool is_read = (r->req.cmd.xfer == SCSI_XFER_FROM_DEV);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- BlockErrorAction action = bdrv_get_on_error(s->qdev.conf.bs, is_read);
+ BlockErrorAction action = bdrv_get_error_action(s->qdev.conf.bs, is_read, error);
- if (action == BLOCK_ERR_IGNORE) {
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_IGNORE, is_read);
- return 0;
- }
-
- if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC)
- || action == BLOCK_ERR_STOP_ANY) {
-
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_STOP, is_read);
- vm_stop(RUN_STATE_IO_ERROR);
- bdrv_iostatus_set_err(s->qdev.conf.bs, error);
- scsi_req_retry(&r->req);
- } else {
+ if (action == BDRV_ACTION_REPORT) {
switch (error) {
case ENOMEDIUM:
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
scsi_check_condition(r, SENSE_CODE(IO_ERROR));
break;
}
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_REPORT, is_read);
}
- return 1;
+ bdrv_error_action(s->qdev.conf.bs, action, is_read, error);
+ if (action == BDRV_ACTION_STOP) {
+ scsi_req_retry(&r->req);
+ }
+ return action != BDRV_ACTION_IGNORE;
}
static void scsi_write_complete(void * opaque, int ret)
return;
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
- DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, r->qiov.size);
+ DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
scsi_req_data(&r->req, r->qiov.size);
}
{
buflen = 8;
outbuf[4] = 0;
- outbuf[5] = 0x60; /* write_same 10/16 supported */
+ outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
outbuf[7] = 0;
break;
if (buflen > SCSI_MAX_INQUIRY_LEN) {
buflen = SCSI_MAX_INQUIRY_LEN;
}
- memset(outbuf, 0, buflen);
outbuf[0] = s->qdev.type & 0x1f;
outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
* is actually implemented, but we're good enough.
*/
outbuf[2] = 5;
- outbuf[3] = 2; /* Format 2 */
+ outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
if (buflen > 36) {
outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
bool start = req->cmd.buf[4] & 1;
bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
+ int pwrcnd = req->cmd.buf[4] & 0xf0;
- if (s->qdev.type == TYPE_ROM && loej) {
+ if (pwrcnd) {
+ /* eject/load only happens for power condition == 0 */
+ return 0;
+ }
+
+ if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
if (!start && !s->tray_open && s->tray_locked) {
scsi_check_condition(r,
bdrv_is_inserted(s->qdev.conf.bs)
int buflen = r->iov.iov_len;
if (buflen) {
- DPRINTF("Read buf_len=%zd\n", buflen);
+ DPRINTF("Read buf_len=%d\n", buflen);
r->iov.iov_len = 0;
r->started = true;
scsi_req_data(&r->req, buflen);
invalid_field:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+}
+
+static inline bool check_lba_range(SCSIDiskState *s,
+ uint64_t sector_num, uint32_t nb_sectors)
+{
+ /*
+ * The first line tests that no overflow happens when computing the last
+ * sector. The second line tests that the last accessed sector is in
+ * range.
+ *
+ * Careful, the computations should not underflow for nb_sectors == 0,
+ * and a 0-block read to the first LBA beyond the end of device is
+ * valid.
+ */
+ return (sector_num <= sector_num + nb_sectors &&
+ sector_num + nb_sectors <= s->qdev.max_lba + 1);
+}
+
+typedef struct UnmapCBData {
+ SCSIDiskReq *r;
+ uint8_t *inbuf;
+ int count;
+} UnmapCBData;
+
+static void scsi_unmap_complete(void *opaque, int ret)
+{
+ UnmapCBData *data = opaque;
+ SCSIDiskReq *r = data->r;
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+ uint64_t sector_num;
+ uint32_t nb_sectors;
+
+ r->req.aiocb = NULL;
+ if (ret < 0) {
+ if (scsi_handle_rw_error(r, -ret)) {
+ goto done;
+ }
+ }
+
+ if (data->count > 0 && !r->req.io_canceled) {
+ sector_num = ldq_be_p(&data->inbuf[0]);
+ nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
+ if (!check_lba_range(s, sector_num, nb_sectors)) {
+ scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
+ goto done;
+ }
+
+ r->req.aiocb = bdrv_aio_discard(s->qdev.conf.bs,
+ sector_num * (s->qdev.blocksize / 512),
+ nb_sectors * (s->qdev.blocksize / 512),
+ scsi_unmap_complete, data);
+ data->count--;
+ data->inbuf += 16;
+ return;
+ }
+
+done:
+ if (data->count == 0) {
+ scsi_req_complete(&r->req, GOOD);
+ }
+ if (!r->req.io_canceled) {
+ scsi_req_unref(&r->req);
+ }
+ g_free(data);
+}
+
+static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
+{
+ uint8_t *p = inbuf;
+ int len = r->req.cmd.xfer;
+ UnmapCBData *data;
+
+ if (len < 8) {
+ goto invalid_param_len;
+ }
+ if (len < lduw_be_p(&p[0]) + 2) {
+ goto invalid_param_len;
+ }
+ if (len < lduw_be_p(&p[2]) + 8) {
+ goto invalid_param_len;
+ }
+ if (lduw_be_p(&p[2]) & 15) {
+ goto invalid_param_len;
+ }
+
+ data = g_new0(UnmapCBData, 1);
+ data->r = r;
+ data->inbuf = &p[8];
+ data->count = lduw_be_p(&p[2]) >> 4;
+
+ /* The matching unref is in scsi_unmap_complete, before data is freed. */
+ scsi_req_ref(&r->req);
+ scsi_unmap_complete(data, 0);
return;
+
+invalid_param_len:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
}
static void scsi_disk_emulate_write_data(SCSIRequest *req)
if (r->iov.iov_len) {
int buflen = r->iov.iov_len;
- DPRINTF("Write buf_len=%zd\n", buflen);
+ DPRINTF("Write buf_len=%d\n", buflen);
r->iov.iov_len = 0;
scsi_req_data(&r->req, buflen);
return;
scsi_disk_emulate_mode_select(r, r->iov.iov_base);
break;
+ case UNMAP:
+ scsi_disk_emulate_unmap(r, r->iov.iov_base);
+ break;
+
default:
abort();
}
break;
}
+ /*
+ * FIXME: we shouldn't return anything bigger than 4k, but the code
+ * requires the buffer to be as big as req->cmd.xfer in several
+ * places. So, do not allow CDBs with a very large ALLOCATION
+ * LENGTH. The real fix would be to modify scsi_read_data and
+ * dma_buf_read, so that they return data beyond the buflen
+ * as all zeros.
+ */
+ if (req->cmd.xfer > 65536) {
+ goto illegal_request;
+ }
+ r->buflen = MAX(4096, req->cmd.xfer);
+
if (!r->iov.iov_base) {
- /*
- * FIXME: we shouldn't return anything bigger than 4k, but the code
- * requires the buffer to be as big as req->cmd.xfer in several
- * places. So, do not allow CDBs with a very large ALLOCATION
- * LENGTH. The real fix would be to modify scsi_read_data and
- * dma_buf_read, so that they return data beyond the buflen
- * as all zeros.
- */
- if (req->cmd.xfer > 65536) {
- goto illegal_request;
- }
- r->buflen = MAX(4096, req->cmd.xfer);
r->iov.iov_base = qemu_blockalign(s->qdev.conf.bs, r->buflen);
}
buflen = req->cmd.xfer;
outbuf = r->iov.iov_base;
+ memset(outbuf, 0, r->buflen);
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
assert(!s->tray_open && bdrv_is_inserted(s->qdev.conf.bs));
outbuf[5] = 0;
outbuf[6] = s->qdev.blocksize >> 8;
outbuf[7] = 0;
- buflen = 8;
break;
case REQUEST_SENSE:
/* Just return "NO SENSE". */
buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
(req->cmd.buf[1] & 1) == 0);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
break;
case MECHANISM_STATUS:
buflen = scsi_emulate_mechanism_status(s, outbuf);
}
/* Protection, exponent and lowest lba field left blank. */
- buflen = req->cmd.xfer;
break;
}
DPRINTF("Unsupported Service Action In\n");
case MODE_SELECT_10:
DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
break;
+ case UNMAP:
+ DPRINTF("Unmap (len %lu)\n", (long)r->req.cmd.xfer);
+ break;
case WRITE_SAME_10:
- nb_sectors = lduw_be_p(&req->cmd.buf[7]);
- goto write_same;
case WRITE_SAME_16:
- nb_sectors = ldl_be_p(&req->cmd.buf[10]) & 0xffffffffULL;
- write_same:
- if (r->req.cmd.lba > s->qdev.max_lba) {
+ nb_sectors = scsi_data_cdb_length(r->req.cmd.buf);
+ if (bdrv_is_read_only(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+ return 0;
+ }
+ if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
goto illegal_lba;
}
return 0;
}
assert(!r->req.aiocb);
- r->iov.iov_len = MIN(buflen, req->cmd.xfer);
+ r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
if (r->iov.iov_len == 0) {
scsi_req_complete(&r->req, GOOD);
}
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
- int32_t len;
+ uint32_t len;
uint8_t command;
command = buf[0];
return 0;
}
+ len = scsi_data_cdb_length(r->req.cmd.buf);
switch (command) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
- len = r->req.cmd.xfer / s->qdev.blocksize;
- DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len);
- if (r->req.cmd.lba > s->qdev.max_lba) {
+ DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len);
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (!check_lba_range(s, r->req.cmd.lba, len)) {
goto illegal_lba;
}
r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
r->sector_count = len * (s->qdev.blocksize / 512);
break;
- case VERIFY_10:
- case VERIFY_12:
- case VERIFY_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
- len = r->req.cmd.xfer / s->qdev.blocksize;
- DPRINTF("Write %s(sector %" PRId64 ", count %d)\n",
+ if (bdrv_is_read_only(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+ return 0;
+ }
+ /* fallthrough */
+ case VERIFY_10:
+ case VERIFY_12:
+ case VERIFY_16:
+ DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
- if (r->req.cmd.lba > s->qdev.max_lba) {
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (!check_lba_range(s, r->req.cmd.lba, len)) {
goto illegal_lba;
}
r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
break;
default:
abort();
+ illegal_request:
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+ return 0;
illegal_lba:
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
blockdev_mark_auto_del(s->qdev.conf.bs);
}
+static void scsi_disk_resize_cb(void *opaque)
+{
+ SCSIDiskState *s = opaque;
+
+ /* SPC lists this sense code as available only for
+ * direct-access devices.
+ */
+ if (s->qdev.type == TYPE_DISK) {
+ scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
+ }
+}
+
static void scsi_cd_change_media_cb(void *opaque, bool load)
{
SCSIDiskState *s = opaque;
*/
s->media_changed = load;
s->tray_open = !load;
- s->qdev.unit_attention = SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM);
+ scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
s->media_event = true;
s->eject_request = false;
}
return ((SCSIDiskState *)opaque)->tray_locked;
}
-static const BlockDevOps scsi_cd_block_ops = {
+static const BlockDevOps scsi_disk_removable_block_ops = {
.change_media_cb = scsi_cd_change_media_cb,
.eject_request_cb = scsi_cd_eject_request_cb,
.is_tray_open = scsi_cd_is_tray_open,
.is_medium_locked = scsi_cd_is_medium_locked,
+
+ .resize_cb = scsi_disk_resize_cb,
+};
+
+static const BlockDevOps scsi_disk_block_ops = {
+ .resize_cb = scsi_disk_resize_cb,
};
static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
if (s->media_changed) {
s->media_changed = false;
- s->qdev.unit_attention = SENSE_CODE(MEDIUM_CHANGED);
+ scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
}
}
}
blkconf_serial(&s->qdev.conf, &s->serial);
- if (blkconf_geometry(&dev->conf, NULL, 65535, 255, 255) < 0) {
+ if (dev->type == TYPE_DISK
+ && blkconf_geometry(&dev->conf, NULL, 65535, 255, 255) < 0) {
return -1;
}
}
if (s->features & (1 << SCSI_DISK_F_REMOVABLE)) {
- bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_cd_block_ops, s);
+ bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_removable_block_ops, s);
+ } else {
+ bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_block_ops, s);
}
bdrv_set_buffer_alignment(s->qdev.conf.bs, s->qdev.blocksize);
[SEEK_10] = &scsi_disk_emulate_reqops,
[MODE_SELECT] = &scsi_disk_emulate_reqops,
[MODE_SELECT_10] = &scsi_disk_emulate_reqops,
+ [UNMAP] = &scsi_disk_emulate_reqops,
[WRITE_SAME_10] = &scsi_disk_emulate_reqops,
[WRITE_SAME_16] = &scsi_disk_emulate_reqops,
const SCSIReqOps *ops;
uint8_t command;
+ command = buf[0];
+ ops = scsi_disk_reqops_dispatch[command];
+ if (!ops) {
+ ops = &scsi_disk_emulate_reqops;
+ }
+ req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
+
#ifdef DEBUG_SCSI
- DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, buf[0]);
+ DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
{
int i;
- for (i = 1; i < r->req.cmd.len; i++) {
+ for (i = 1; i < req->cmd.len; i++) {
printf(" 0x%02x", buf[i]);
}
printf("\n");
}
#endif
- command = buf[0];
- ops = scsi_disk_reqops_dispatch[command];
- if (!ops) {
- ops = &scsi_disk_emulate_reqops;
- }
- req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
return req;
}
#ifdef __linux__
static Property scsi_block_properties[] = {
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.bs),
+ DEFINE_PROP_INT32("bootindex", SCSIDiskState, qdev.conf.bootindex, -1),
DEFINE_PROP_END_OF_LIST(),
};