*/
static int scsi_handle_rw_error(SCSIDiskReq *r, int error)
{
- int is_read = (r->req.cmd.xfer == SCSI_XFER_FROM_DEV);
+ bool is_read = (r->req.cmd.xfer == SCSI_XFER_FROM_DEV);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- BlockdevOnError action = bdrv_get_on_error(s->qdev.conf.bs, is_read);
+ BlockErrorAction action = bdrv_get_error_action(s->qdev.conf.bs, is_read, error);
- if (action == BLOCK_ERR_IGNORE) {
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_IGNORE, is_read);
- return 0;
- }
-
- if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC)
- || action == BLOCK_ERR_STOP_ANY) {
-
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_STOP, is_read);
- vm_stop(RUN_STATE_IO_ERROR);
- bdrv_iostatus_set_err(s->qdev.conf.bs, error);
- scsi_req_retry(&r->req);
- } else {
+ if (action == BDRV_ACTION_REPORT) {
switch (error) {
case ENOMEDIUM:
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
scsi_check_condition(r, SENSE_CODE(IO_ERROR));
break;
}
- bdrv_emit_qmp_error_event(s->qdev.conf.bs, BDRV_ACTION_REPORT, is_read);
}
- return 1;
+ bdrv_error_action(s->qdev.conf.bs, action, is_read, error);
+ if (action == BDRV_ACTION_STOP) {
+ scsi_req_retry(&r->req);
+ }
+ return action != BDRV_ACTION_IGNORE;
}
static void scsi_write_complete(void * opaque, int ret)
if (buflen > SCSI_MAX_INQUIRY_LEN) {
buflen = SCSI_MAX_INQUIRY_LEN;
}
- memset(outbuf, 0, buflen);
outbuf[0] = s->qdev.type & 0x1f;
outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
invalid_field:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
- return;
}
static inline bool check_lba_range(SCSIDiskState *s,
invalid_param_len:
scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
- return;
}
static void scsi_disk_emulate_write_data(SCSIRequest *req)
break;
}
+ /*
+ * FIXME: we shouldn't return anything bigger than 4k, but the code
+ * requires the buffer to be as big as req->cmd.xfer in several
+ * places. So, do not allow CDBs with a very large ALLOCATION
+ * LENGTH. The real fix would be to modify scsi_read_data and
+ * dma_buf_read, so that they return data beyond the buflen
+ * as all zeros.
+ */
+ if (req->cmd.xfer > 65536) {
+ goto illegal_request;
+ }
+ r->buflen = MAX(4096, req->cmd.xfer);
+
if (!r->iov.iov_base) {
- /*
- * FIXME: we shouldn't return anything bigger than 4k, but the code
- * requires the buffer to be as big as req->cmd.xfer in several
- * places. So, do not allow CDBs with a very large ALLOCATION
- * LENGTH. The real fix would be to modify scsi_read_data and
- * dma_buf_read, so that they return data beyond the buflen
- * as all zeros.
- */
- if (req->cmd.xfer > 65536) {
- goto illegal_request;
- }
- r->buflen = MAX(4096, req->cmd.xfer);
r->iov.iov_base = qemu_blockalign(s->qdev.conf.bs, r->buflen);
}
buflen = req->cmd.xfer;
outbuf = r->iov.iov_base;
+ memset(outbuf, 0, r->buflen);
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
assert(!s->tray_open && bdrv_is_inserted(s->qdev.conf.bs));
outbuf[5] = 0;
outbuf[6] = s->qdev.blocksize >> 8;
outbuf[7] = 0;
- buflen = 8;
break;
case REQUEST_SENSE:
/* Just return "NO SENSE". */
buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen,
(req->cmd.buf[1] & 1) == 0);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
break;
case MECHANISM_STATUS:
buflen = scsi_emulate_mechanism_status(s, outbuf);
}
/* Protection, exponent and lowest lba field left blank. */
- buflen = req->cmd.xfer;
break;
}
DPRINTF("Unsupported Service Action In\n");
return 0;
}
assert(!r->req.aiocb);
- r->iov.iov_len = MIN(buflen, req->cmd.xfer);
+ r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
if (r->iov.iov_len == 0) {
scsi_req_complete(&r->req, GOOD);
}
* direct-access devices.
*/
if (s->qdev.type == TYPE_DISK) {
- scsi_device_set_ua(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
}
}