2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "hw/scsi/scsi.h"
29 #include "migration/qemu-file-types.h"
30 #include "migration/vmstate.h"
31 #include "hw/scsi/emulation.h"
32 #include "scsi/constants.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/block-backend.h"
35 #include "sysemu/blockdev.h"
36 #include "hw/block/block.h"
37 #include "sysemu/dma.h"
38 #include "qemu/cutils.h"
45 #define SCSI_WRITE_SAME_MAX (512 * KiB)
46 #define SCSI_DMA_BUF_SIZE (128 * KiB)
47 #define SCSI_MAX_INQUIRY_LEN 256
48 #define SCSI_MAX_MODE_LEN 256
50 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
51 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
52 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
54 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
56 #define SCSI_DISK_BASE(obj) \
57 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE)
58 #define SCSI_DISK_BASE_CLASS(klass) \
59 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE)
60 #define SCSI_DISK_BASE_GET_CLASS(obj) \
61 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE)
63 typedef struct SCSIDiskClass {
64 SCSIDeviceClass parent_class;
66 DMAIOFunc *dma_writev;
67 bool (*need_fua_emulation)(SCSICommand *cmd);
68 void (*update_sense)(SCSIRequest *r);
71 typedef struct SCSIDiskReq {
73 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */
75 uint32_t sector_count;
78 bool need_fua_emulation;
82 unsigned char *status;
85 #define SCSI_DISK_F_REMOVABLE 0
86 #define SCSI_DISK_F_DPOFUA 1
87 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
89 typedef struct SCSIDiskState
97 uint64_t max_unmap_size;
108 * 0x0000 - rotation rate not reported
109 * 0x0001 - non-rotating medium (SSD)
110 * 0x0002-0x0400 - reserved
111 * 0x0401-0xffe - rotations per minute
114 uint16_t rotation_rate;
117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed);
119 static void scsi_free_request(SCSIRequest *req)
121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
123 qemu_vfree(r->iov.iov_base);
126 /* Helper function for command completion with sense. */
127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
129 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
131 scsi_req_build_sense(&r->req, sense);
132 scsi_req_complete(&r->req, CHECK_CONDITION);
135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
139 if (!r->iov.iov_base) {
141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
144 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
151 qemu_put_be64s(f, &r->sector);
152 qemu_put_be32s(f, &r->sector_count);
153 qemu_put_be32s(f, &r->buflen);
155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
157 } else if (!req->retry) {
158 uint32_t len = r->iov.iov_len;
159 qemu_put_be32s(f, &len);
160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
169 qemu_get_be64s(f, &r->sector);
170 qemu_get_be32s(f, &r->sector_count);
171 qemu_get_be32s(f, &r->buflen);
173 scsi_init_iovec(r, r->buflen);
174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
176 } else if (!r->req.retry) {
178 qemu_get_be32s(f, &len);
179 r->iov.iov_len = len;
180 assert(r->iov.iov_len <= r->buflen);
181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
185 qemu_iovec_init_external(&r->qiov, &r->iov, 1);
188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
190 if (r->req.io_canceled) {
191 scsi_req_cancel_complete(&r->req);
195 if (ret < 0 || (r->status && *r->status)) {
196 return scsi_handle_rw_error(r, -ret, acct_failed);
202 static void scsi_aio_complete(void *opaque, int ret)
204 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
207 assert(r->req.aiocb != NULL);
209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
210 if (scsi_disk_req_check_error(r, ret, true)) {
214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
215 scsi_req_complete(&r->req, GOOD);
218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
219 scsi_req_unref(&r->req);
222 static bool scsi_is_cmd_fua(SCSICommand *cmd)
224 switch (cmd->buf[0]) {
231 return (cmd->buf[1] & 8) != 0;
236 case WRITE_VERIFY_10:
237 case WRITE_VERIFY_12:
238 case WRITE_VERIFY_16:
248 static void scsi_write_do_fua(SCSIDiskReq *r)
250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
252 assert(r->req.aiocb == NULL);
253 assert(!r->req.io_canceled);
255 if (r->need_fua_emulation) {
256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
262 scsi_req_complete(&r->req, GOOD);
263 scsi_req_unref(&r->req);
266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
268 assert(r->req.aiocb == NULL);
269 if (scsi_disk_req_check_error(r, ret, false)) {
273 r->sector += r->sector_count;
275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
276 scsi_write_do_fua(r);
279 scsi_req_complete(&r->req, GOOD);
283 scsi_req_unref(&r->req);
286 static void scsi_dma_complete(void *opaque, int ret)
288 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
291 assert(r->req.aiocb != NULL);
294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
300 scsi_dma_complete_noio(r, ret);
301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
304 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
308 assert(r->req.aiocb == NULL);
309 if (scsi_disk_req_check_error(r, ret, false)) {
313 n = r->qiov.size / 512;
315 r->sector_count -= n;
316 scsi_req_data(&r->req, r->qiov.size);
319 scsi_req_unref(&r->req);
322 static void scsi_read_complete(void *opaque, int ret)
324 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
325 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
327 assert(r->req.aiocb != NULL);
330 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
332 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
334 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
335 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
337 scsi_read_complete_noio(r, ret);
338 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
341 /* Actually issue a read to the block device. */
342 static void scsi_do_read(SCSIDiskReq *r, int ret)
344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
345 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
347 assert (r->req.aiocb == NULL);
348 if (scsi_disk_req_check_error(r, ret, false)) {
352 /* The request is used as the AIO opaque value, so add a ref. */
353 scsi_req_ref(&r->req);
356 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
357 r->req.resid -= r->req.sg->size;
358 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
359 r->req.sg, r->sector << BDRV_SECTOR_BITS,
361 sdc->dma_readv, r, scsi_dma_complete, r,
362 DMA_DIRECTION_FROM_DEVICE);
364 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
365 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
366 r->qiov.size, BLOCK_ACCT_READ);
367 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
368 scsi_read_complete, r, r);
372 scsi_req_unref(&r->req);
375 static void scsi_do_read_cb(void *opaque, int ret)
377 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
380 assert (r->req.aiocb != NULL);
383 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
385 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
387 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
389 scsi_do_read(opaque, ret);
390 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
393 /* Read more data from scsi device into buffer. */
394 static void scsi_read_data(SCSIRequest *req)
396 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
397 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
400 trace_scsi_disk_read_data_count(r->sector_count);
401 if (r->sector_count == 0) {
402 /* This also clears the sense buffer for REQUEST SENSE. */
403 scsi_req_complete(&r->req, GOOD);
407 /* No data transfer may already be in progress */
408 assert(r->req.aiocb == NULL);
410 /* The request is used as the AIO opaque value, so add a ref. */
411 scsi_req_ref(&r->req);
412 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
413 trace_scsi_disk_read_data_invalid();
414 scsi_read_complete_noio(r, -EINVAL);
418 if (!blk_is_available(req->dev->conf.blk)) {
419 scsi_read_complete_noio(r, -ENOMEDIUM);
425 if (first && r->need_fua_emulation) {
426 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
428 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
435 * scsi_handle_rw_error has two return values. False means that the error
436 * must be ignored, true means that the error has been processed and the
437 * caller should not do anything else for this request. Note that
438 * scsi_handle_rw_error always manages its reference counts, independent
439 * of the return value.
441 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed)
443 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
445 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
446 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk,
449 if (action == BLOCK_ERROR_ACTION_REPORT) {
451 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
455 /* A passthrough command has run and has produced sense data; check
456 * whether the error has to be handled by the guest or should rather
459 assert(r->status && *r->status);
460 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
461 /* These errors are handled by guest. */
462 sdc->update_sense(&r->req);
463 scsi_req_complete(&r->req, *r->status);
466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
469 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
472 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE));
475 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
478 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED));
481 scsi_check_condition(r, SENSE_CODE(IO_ERROR));
486 blk_error_action(s->qdev.conf.blk, action, is_read, error);
487 if (action == BLOCK_ERROR_ACTION_IGNORE) {
488 scsi_req_complete(&r->req, 0);
492 if (action == BLOCK_ERROR_ACTION_STOP) {
493 scsi_req_retry(&r->req);
498 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
502 assert (r->req.aiocb == NULL);
503 if (scsi_disk_req_check_error(r, ret, false)) {
507 n = r->qiov.size / 512;
509 r->sector_count -= n;
510 if (r->sector_count == 0) {
511 scsi_write_do_fua(r);
514 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
515 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
516 scsi_req_data(&r->req, r->qiov.size);
520 scsi_req_unref(&r->req);
523 static void scsi_write_complete(void * opaque, int ret)
525 SCSIDiskReq *r = (SCSIDiskReq *)opaque;
526 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
528 assert (r->req.aiocb != NULL);
531 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
533 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
535 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
537 scsi_write_complete_noio(r, ret);
538 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
541 static void scsi_write_data(SCSIRequest *req)
543 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
544 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
545 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
547 /* No data transfer may already be in progress */
548 assert(r->req.aiocb == NULL);
550 /* The request is used as the AIO opaque value, so add a ref. */
551 scsi_req_ref(&r->req);
552 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
553 trace_scsi_disk_write_data_invalid();
554 scsi_write_complete_noio(r, -EINVAL);
558 if (!r->req.sg && !r->qiov.size) {
559 /* Called for the first time. Ask the driver to send us more data. */
561 scsi_write_complete_noio(r, 0);
564 if (!blk_is_available(req->dev->conf.blk)) {
565 scsi_write_complete_noio(r, -ENOMEDIUM);
569 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
570 r->req.cmd.buf[0] == VERIFY_16) {
572 scsi_dma_complete_noio(r, 0);
574 scsi_write_complete_noio(r, 0);
580 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
581 r->req.resid -= r->req.sg->size;
582 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
583 r->req.sg, r->sector << BDRV_SECTOR_BITS,
585 sdc->dma_writev, r, scsi_dma_complete, r,
586 DMA_DIRECTION_TO_DEVICE);
588 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
589 r->qiov.size, BLOCK_ACCT_WRITE);
590 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
591 scsi_write_complete, r, r);
595 /* Return a pointer to the data buffer. */
596 static uint8_t *scsi_get_buf(SCSIRequest *req)
598 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
600 return (uint8_t *)r->iov.iov_base;
603 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
605 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
606 uint8_t page_code = req->cmd.buf[2];
607 int start, buflen = 0;
609 outbuf[buflen++] = s->qdev.type & 0x1f;
610 outbuf[buflen++] = page_code;
611 outbuf[buflen++] = 0x00;
612 outbuf[buflen++] = 0x00;
616 case 0x00: /* Supported page codes, mandatory */
618 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
619 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
621 outbuf[buflen++] = 0x80; /* unit serial number */
623 outbuf[buflen++] = 0x83; /* device identification */
624 if (s->qdev.type == TYPE_DISK) {
625 outbuf[buflen++] = 0xb0; /* block limits */
626 outbuf[buflen++] = 0xb1; /* block device characteristics */
627 outbuf[buflen++] = 0xb2; /* thin provisioning */
631 case 0x80: /* Device serial number, optional */
636 trace_scsi_disk_emulate_vpd_page_80_not_supported();
640 l = strlen(s->serial);
645 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
646 memcpy(outbuf + buflen, s->serial, l);
651 case 0x83: /* Device identification page, mandatory */
653 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
655 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
658 outbuf[buflen++] = 0x2; /* ASCII */
659 outbuf[buflen++] = 0; /* not officially assigned */
660 outbuf[buflen++] = 0; /* reserved */
661 outbuf[buflen++] = id_len; /* length of data following */
662 memcpy(outbuf + buflen, s->device_id, id_len);
667 outbuf[buflen++] = 0x1; /* Binary */
668 outbuf[buflen++] = 0x3; /* NAA */
669 outbuf[buflen++] = 0; /* reserved */
670 outbuf[buflen++] = 8;
671 stq_be_p(&outbuf[buflen], s->qdev.wwn);
675 if (s->qdev.port_wwn) {
676 outbuf[buflen++] = 0x61; /* SAS / Binary */
677 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
678 outbuf[buflen++] = 0; /* reserved */
679 outbuf[buflen++] = 8;
680 stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
685 outbuf[buflen++] = 0x61; /* SAS / Binary */
687 /* PIV/Target port/relative target port */
688 outbuf[buflen++] = 0x94;
690 outbuf[buflen++] = 0; /* reserved */
691 outbuf[buflen++] = 4;
692 stw_be_p(&outbuf[buflen + 2], s->port_index);
697 case 0xb0: /* block limits */
699 SCSIBlockLimits bl = {};
701 if (s->qdev.type == TYPE_ROM) {
702 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
707 s->qdev.conf.discard_granularity / s->qdev.blocksize;
709 s->qdev.conf.min_io_size / s->qdev.blocksize;
711 s->qdev.conf.opt_io_size / s->qdev.blocksize;
712 bl.max_unmap_sectors =
713 s->max_unmap_size / s->qdev.blocksize;
715 s->max_io_size / s->qdev.blocksize;
716 /* 255 descriptors fit in 4 KiB with an 8-byte header */
717 bl.max_unmap_descr = 255;
719 if (s->qdev.type == TYPE_DISK) {
720 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
721 int max_io_sectors_blk =
722 max_transfer_blk / s->qdev.blocksize;
725 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
727 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
730 case 0xb1: /* block device characteristics */
733 outbuf[4] = (s->rotation_rate >> 8) & 0xff;
734 outbuf[5] = s->rotation_rate & 0xff;
735 outbuf[6] = 0; /* PRODUCT TYPE */
736 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
737 outbuf[8] = 0; /* VBULS */
740 case 0xb2: /* thin provisioning */
744 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
745 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
753 assert(buflen - start <= 255);
754 outbuf[start - 1] = buflen - start;
758 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
760 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
763 if (req->cmd.buf[1] & 0x1) {
764 /* Vital product data */
765 return scsi_disk_emulate_vpd_page(req, outbuf);
768 /* Standard INQUIRY data */
769 if (req->cmd.buf[2] != 0) {
774 buflen = req->cmd.xfer;
775 if (buflen > SCSI_MAX_INQUIRY_LEN) {
776 buflen = SCSI_MAX_INQUIRY_LEN;
779 outbuf[0] = s->qdev.type & 0x1f;
780 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
782 strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
783 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
785 memset(&outbuf[32], 0, 4);
786 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
788 * We claim conformance to SPC-3, which is required for guests
789 * to ask for modern features like READ CAPACITY(16) or the
790 * block characteristics VPD page by default. Not all of SPC-3
791 * is actually implemented, but we're good enough.
793 outbuf[2] = s->qdev.default_scsi_version;
794 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
797 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
799 /* If the allocation length of CDB is too small,
800 the additional length is not adjusted */
804 /* Sync data transfer and TCQ. */
805 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
809 static inline bool media_is_dvd(SCSIDiskState *s)
812 if (s->qdev.type != TYPE_ROM) {
815 if (!blk_is_available(s->qdev.conf.blk)) {
818 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
819 return nb_sectors > CD_MAX_SECTORS;
822 static inline bool media_is_cd(SCSIDiskState *s)
825 if (s->qdev.type != TYPE_ROM) {
828 if (!blk_is_available(s->qdev.conf.blk)) {
831 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
832 return nb_sectors <= CD_MAX_SECTORS;
835 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
838 uint8_t type = r->req.cmd.buf[1] & 7;
840 if (s->qdev.type != TYPE_ROM) {
844 /* Types 1/2 are only defined for Blu-Ray. */
846 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
850 memset(outbuf, 0, 34);
852 outbuf[2] = 0xe; /* last session complete, disc finalized */
853 outbuf[3] = 1; /* first track on disc */
854 outbuf[4] = 1; /* # of sessions */
855 outbuf[5] = 1; /* first track of last session */
856 outbuf[6] = 1; /* last track of last session */
857 outbuf[7] = 0x20; /* unrestricted use */
858 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
859 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
860 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
861 /* 24-31: disc bar code */
862 /* 32: disc application code */
863 /* 33: number of OPC tables */
868 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
871 static const int rds_caps_size[5] = {
878 uint8_t media = r->req.cmd.buf[1];
879 uint8_t layer = r->req.cmd.buf[6];
880 uint8_t format = r->req.cmd.buf[7];
883 if (s->qdev.type != TYPE_ROM) {
887 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
891 if (format != 0xff) {
892 if (!blk_is_available(s->qdev.conf.blk)) {
893 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
896 if (media_is_cd(s)) {
897 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
900 if (format >= ARRAY_SIZE(rds_caps_size)) {
903 size = rds_caps_size[format];
904 memset(outbuf, 0, size);
909 /* Physical format information */
914 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
916 outbuf[4] = 1; /* DVD-ROM, part version 1 */
917 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
918 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */
919 outbuf[7] = 0; /* default densities */
921 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
922 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
926 case 0x01: /* DVD copyright information, all zeros */
929 case 0x03: /* BCA information - invalid field for no BCA info */
932 case 0x04: /* DVD disc manufacturing information, all zeros */
935 case 0xff: { /* List capabilities */
938 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
939 if (!rds_caps_size[i]) {
943 outbuf[size + 1] = 0x40; /* Not writable, readable */
944 stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
954 /* Size of buffer, not including 2 byte size field */
955 stw_be_p(outbuf, size - 2);
962 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
964 uint8_t event_code, media_status;
968 media_status = MS_TRAY_OPEN;
969 } else if (blk_is_inserted(s->qdev.conf.blk)) {
970 media_status = MS_MEDIA_PRESENT;
973 /* Event notification descriptor */
974 event_code = MEC_NO_CHANGE;
975 if (media_status != MS_TRAY_OPEN) {
976 if (s->media_event) {
977 event_code = MEC_NEW_MEDIA;
978 s->media_event = false;
979 } else if (s->eject_request) {
980 event_code = MEC_EJECT_REQUESTED;
981 s->eject_request = false;
985 outbuf[0] = event_code;
986 outbuf[1] = media_status;
988 /* These fields are reserved, just clear them. */
994 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
998 uint8_t *buf = r->req.cmd.buf;
999 uint8_t notification_class_request = buf[4];
1000 if (s->qdev.type != TYPE_ROM) {
1003 if ((buf[1] & 1) == 0) {
1009 outbuf[0] = outbuf[1] = 0;
1010 outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1011 if (notification_class_request & (1 << GESN_MEDIA)) {
1012 outbuf[2] = GESN_MEDIA;
1013 size += scsi_event_status_media(s, &outbuf[size]);
1017 stw_be_p(outbuf, size - 4);
1021 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1025 if (s->qdev.type != TYPE_ROM) {
1029 if (media_is_dvd(s)) {
1030 current = MMC_PROFILE_DVD_ROM;
1031 } else if (media_is_cd(s)) {
1032 current = MMC_PROFILE_CD_ROM;
1034 current = MMC_PROFILE_NONE;
1037 memset(outbuf, 0, 40);
1038 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1039 stw_be_p(&outbuf[6], current);
1040 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1041 outbuf[10] = 0x03; /* persistent, current */
1042 outbuf[11] = 8; /* two profiles */
1043 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1044 outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1045 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1046 outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1047 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1048 stw_be_p(&outbuf[20], 1);
1049 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1051 stl_be_p(&outbuf[24], 1); /* SCSI */
1052 outbuf[28] = 1; /* DBE = 1, mandatory */
1053 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1054 stw_be_p(&outbuf[32], 3);
1055 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1057 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1058 /* TODO: Random readable, CD read, DVD read, drive serial number,
1063 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1065 if (s->qdev.type != TYPE_ROM) {
1068 memset(outbuf, 0, 8);
1069 outbuf[5] = 1; /* CD-ROM */
1073 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1076 static const int mode_sense_valid[0x3f] = {
1077 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK),
1078 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1079 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1080 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1081 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM),
1082 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
1085 uint8_t *p = *p_outbuf + 2;
1088 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1093 * If Changeable Values are requested, a mask denoting those mode parameters
1094 * that are changeable shall be returned. As we currently don't support
1095 * parameter changes via MODE_SELECT all bits are returned set to zero.
1096 * The buffer was already menset to zero by the caller of this function.
1098 * The offsets here are off by two compared to the descriptions in the
1099 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1100 * but it is done so that offsets are consistent within our implementation
1101 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1102 * 2-byte and 4-byte headers.
1105 case MODE_PAGE_HD_GEOMETRY:
1107 if (page_control == 1) { /* Changeable Values */
1110 /* if a geometry hint is available, use it */
1111 p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1112 p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1113 p[2] = s->qdev.conf.cyls & 0xff;
1114 p[3] = s->qdev.conf.heads & 0xff;
1115 /* Write precomp start cylinder, disabled */
1116 p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1117 p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1118 p[6] = s->qdev.conf.cyls & 0xff;
1119 /* Reduced current start cylinder, disabled */
1120 p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1121 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1122 p[9] = s->qdev.conf.cyls & 0xff;
1123 /* Device step rate [ns], 200ns */
1126 /* Landing zone cylinder */
1130 /* Medium rotation rate [rpm], 5400 rpm */
1131 p[18] = (5400 >> 8) & 0xff;
1132 p[19] = 5400 & 0xff;
1135 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1137 if (page_control == 1) { /* Changeable Values */
1140 /* Transfer rate [kbit/s], 5Mbit/s */
1143 /* if a geometry hint is available, use it */
1144 p[2] = s->qdev.conf.heads & 0xff;
1145 p[3] = s->qdev.conf.secs & 0xff;
1146 p[4] = s->qdev.blocksize >> 8;
1147 p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1148 p[7] = s->qdev.conf.cyls & 0xff;
1149 /* Write precomp start cylinder, disabled */
1150 p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1151 p[9] = s->qdev.conf.cyls & 0xff;
1152 /* Reduced current start cylinder, disabled */
1153 p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1154 p[11] = s->qdev.conf.cyls & 0xff;
1155 /* Device step rate [100us], 100us */
1158 /* Device step pulse width [us], 1us */
1160 /* Device head settle delay [100us], 100us */
1163 /* Motor on delay [0.1s], 0.1s */
1165 /* Motor off delay [0.1s], 0.1s */
1167 /* Medium rotation rate [rpm], 5400 rpm */
1168 p[26] = (5400 >> 8) & 0xff;
1169 p[27] = 5400 & 0xff;
1172 case MODE_PAGE_CACHING:
1174 if (page_control == 1 || /* Changeable Values */
1175 blk_enable_write_cache(s->qdev.conf.blk)) {
1180 case MODE_PAGE_R_W_ERROR:
1182 if (page_control == 1) { /* Changeable Values */
1185 p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1186 if (s->qdev.type == TYPE_ROM) {
1187 p[1] = 0x20; /* Read Retry Count */
1191 case MODE_PAGE_AUDIO_CTL:
1195 case MODE_PAGE_CAPABILITIES:
1197 if (page_control == 1) { /* Changeable Values */
1201 p[0] = 0x3b; /* CD-R & CD-RW read */
1202 p[1] = 0; /* Writing not supported */
1203 p[2] = 0x7f; /* Audio, composite, digital out,
1204 mode 2 form 1&2, multi session */
1205 p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1206 RW corrected, C2 errors, ISRC,
1208 p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1209 /* Locking supported, jumper present, eject, tray */
1210 p[5] = 0; /* no volume & mute control, no
1212 p[6] = (50 * 176) >> 8; /* 50x read speed */
1213 p[7] = (50 * 176) & 0xff;
1214 p[8] = 2 >> 8; /* Two volume levels */
1216 p[10] = 2048 >> 8; /* 2M buffer */
1217 p[11] = 2048 & 0xff;
1218 p[12] = (16 * 176) >> 8; /* 16x read speed current */
1219 p[13] = (16 * 176) & 0xff;
1220 p[16] = (16 * 176) >> 8; /* 16x write speed */
1221 p[17] = (16 * 176) & 0xff;
1222 p[18] = (16 * 176) >> 8; /* 16x write speed current */
1223 p[19] = (16 * 176) & 0xff;
1230 assert(length < 256);
1231 (*p_outbuf)[0] = page;
1232 (*p_outbuf)[1] = length;
1233 *p_outbuf += length + 2;
1237 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1239 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1240 uint64_t nb_sectors;
1242 int page, buflen, ret, page_control;
1244 uint8_t dev_specific_param;
1246 dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1247 page = r->req.cmd.buf[2] & 0x3f;
1248 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1250 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1251 10, page, r->req.cmd.xfer, page_control);
1252 memset(outbuf, 0, r->req.cmd.xfer);
1255 if (s->qdev.type == TYPE_DISK) {
1256 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1257 if (blk_is_read_only(s->qdev.conf.blk)) {
1258 dev_specific_param |= 0x80; /* Readonly. */
1261 /* MMC prescribes that CD/DVD drives have no block descriptors,
1262 * and defines no device-specific parameter. */
1263 dev_specific_param = 0x00;
1267 if (r->req.cmd.buf[0] == MODE_SENSE) {
1268 p[1] = 0; /* Default media type. */
1269 p[2] = dev_specific_param;
1270 p[3] = 0; /* Block descriptor length. */
1272 } else { /* MODE_SENSE_10 */
1273 p[2] = 0; /* Default media type. */
1274 p[3] = dev_specific_param;
1275 p[6] = p[7] = 0; /* Block descriptor length. */
1279 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1280 if (!dbd && nb_sectors) {
1281 if (r->req.cmd.buf[0] == MODE_SENSE) {
1282 outbuf[3] = 8; /* Block descriptor length */
1283 } else { /* MODE_SENSE_10 */
1284 outbuf[7] = 8; /* Block descriptor length */
1286 nb_sectors /= (s->qdev.blocksize / 512);
1287 if (nb_sectors > 0xffffff) {
1290 p[0] = 0; /* media density code */
1291 p[1] = (nb_sectors >> 16) & 0xff;
1292 p[2] = (nb_sectors >> 8) & 0xff;
1293 p[3] = nb_sectors & 0xff;
1294 p[4] = 0; /* reserved */
1295 p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1296 p[6] = s->qdev.blocksize >> 8;
1301 if (page_control == 3) {
1303 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1308 for (page = 0; page <= 0x3e; page++) {
1309 mode_sense_page(s, page, &p, page_control);
1312 ret = mode_sense_page(s, page, &p, page_control);
1318 buflen = p - outbuf;
1320 * The mode data length field specifies the length in bytes of the
1321 * following data that is available to be transferred. The mode data
1322 * length does not include itself.
1324 if (r->req.cmd.buf[0] == MODE_SENSE) {
1325 outbuf[0] = buflen - 1;
1326 } else { /* MODE_SENSE_10 */
1327 outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1328 outbuf[1] = (buflen - 2) & 0xff;
1333 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1335 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1336 int start_track, format, msf, toclen;
1337 uint64_t nb_sectors;
1339 msf = req->cmd.buf[1] & 2;
1340 format = req->cmd.buf[2] & 0xf;
1341 start_track = req->cmd.buf[6];
1342 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1343 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1344 nb_sectors /= s->qdev.blocksize / 512;
1347 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1350 /* multi session : only a single session defined */
1352 memset(outbuf, 0, 12);
1358 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1366 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1368 SCSIRequest *req = &r->req;
1369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1370 bool start = req->cmd.buf[4] & 1;
1371 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1372 int pwrcnd = req->cmd.buf[4] & 0xf0;
1375 /* eject/load only happens for power condition == 0 */
1379 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1380 if (!start && !s->tray_open && s->tray_locked) {
1381 scsi_check_condition(r,
1382 blk_is_inserted(s->qdev.conf.blk)
1383 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1384 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1388 if (s->tray_open != !start) {
1389 blk_eject(s->qdev.conf.blk, !start);
1390 s->tray_open = !start;
1396 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1398 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1399 int buflen = r->iov.iov_len;
1402 trace_scsi_disk_emulate_read_data(buflen);
1405 scsi_req_data(&r->req, buflen);
1409 /* This also clears the sense buffer for REQUEST SENSE. */
1410 scsi_req_complete(&r->req, GOOD);
1413 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1414 uint8_t *inbuf, int inlen)
1416 uint8_t mode_current[SCSI_MAX_MODE_LEN];
1417 uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1419 int len, expected_len, changeable_len, i;
1421 /* The input buffer does not include the page header, so it is
1424 expected_len = inlen + 2;
1425 if (expected_len > SCSI_MAX_MODE_LEN) {
1430 memset(mode_current, 0, inlen + 2);
1431 len = mode_sense_page(s, page, &p, 0);
1432 if (len < 0 || len != expected_len) {
1436 p = mode_changeable;
1437 memset(mode_changeable, 0, inlen + 2);
1438 changeable_len = mode_sense_page(s, page, &p, 1);
1439 assert(changeable_len == len);
1441 /* Check that unchangeable bits are the same as what MODE SENSE
1444 for (i = 2; i < len; i++) {
1445 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1452 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1455 case MODE_PAGE_CACHING:
1456 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1464 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1466 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1469 int page, subpage, page_len;
1471 /* Parse both possible formats for the mode page headers. */
1475 goto invalid_param_len;
1478 page_len = lduw_be_p(&p[2]);
1483 goto invalid_param_len;
1494 if (page_len > len) {
1495 goto invalid_param_len;
1499 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1503 scsi_disk_apply_mode_select(s, page, p);
1512 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1516 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1520 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1522 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1524 int cmd = r->req.cmd.buf[0];
1525 int len = r->req.cmd.xfer;
1526 int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1530 /* We only support PF=1, SP=0. */
1531 if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1535 if (len < hdr_len) {
1536 goto invalid_param_len;
1539 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1543 goto invalid_param_len;
1545 if (bd_len != 0 && bd_len != 8) {
1552 /* Ensure no change is made if there is an error! */
1553 for (pass = 0; pass < 2; pass++) {
1554 if (mode_select_pages(r, p, len, pass == 1) < 0) {
1559 if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1560 /* The request is used as the AIO opaque value, so add a ref. */
1561 scsi_req_ref(&r->req);
1562 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1564 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1568 scsi_req_complete(&r->req, GOOD);
1572 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1576 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1580 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1583 static inline bool check_lba_range(SCSIDiskState *s,
1584 uint64_t sector_num, uint32_t nb_sectors)
1587 * The first line tests that no overflow happens when computing the last
1588 * sector. The second line tests that the last accessed sector is in
1591 * Careful, the computations should not underflow for nb_sectors == 0,
1592 * and a 0-block read to the first LBA beyond the end of device is
1595 return (sector_num <= sector_num + nb_sectors &&
1596 sector_num + nb_sectors <= s->qdev.max_lba + 1);
1599 typedef struct UnmapCBData {
1605 static void scsi_unmap_complete(void *opaque, int ret);
1607 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1609 SCSIDiskReq *r = data->r;
1610 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1611 uint64_t sector_num;
1612 uint32_t nb_sectors;
1614 assert(r->req.aiocb == NULL);
1615 if (scsi_disk_req_check_error(r, ret, false)) {
1619 if (data->count > 0) {
1620 sector_num = ldq_be_p(&data->inbuf[0]);
1621 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1622 if (!check_lba_range(s, sector_num, nb_sectors)) {
1623 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1627 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1628 sector_num * s->qdev.blocksize,
1629 nb_sectors * s->qdev.blocksize,
1630 scsi_unmap_complete, data);
1636 scsi_req_complete(&r->req, GOOD);
1639 scsi_req_unref(&r->req);
1643 static void scsi_unmap_complete(void *opaque, int ret)
1645 UnmapCBData *data = opaque;
1646 SCSIDiskReq *r = data->r;
1647 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1649 assert(r->req.aiocb != NULL);
1650 r->req.aiocb = NULL;
1652 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1653 scsi_unmap_complete_noio(data, ret);
1654 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1657 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1659 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1661 int len = r->req.cmd.xfer;
1664 /* Reject ANCHOR=1. */
1665 if (r->req.cmd.buf[1] & 0x1) {
1670 goto invalid_param_len;
1672 if (len < lduw_be_p(&p[0]) + 2) {
1673 goto invalid_param_len;
1675 if (len < lduw_be_p(&p[2]) + 8) {
1676 goto invalid_param_len;
1678 if (lduw_be_p(&p[2]) & 15) {
1679 goto invalid_param_len;
1682 if (blk_is_read_only(s->qdev.conf.blk)) {
1683 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1687 data = g_new0(UnmapCBData, 1);
1689 data->inbuf = &p[8];
1690 data->count = lduw_be_p(&p[2]) >> 4;
1692 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1693 scsi_req_ref(&r->req);
1694 scsi_unmap_complete_noio(data, 0);
1698 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1702 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1705 typedef struct WriteSameCBData {
1713 static void scsi_write_same_complete(void *opaque, int ret)
1715 WriteSameCBData *data = opaque;
1716 SCSIDiskReq *r = data->r;
1717 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1719 assert(r->req.aiocb != NULL);
1720 r->req.aiocb = NULL;
1721 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1722 if (scsi_disk_req_check_error(r, ret, true)) {
1726 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1728 data->nb_sectors -= data->iov.iov_len / 512;
1729 data->sector += data->iov.iov_len / 512;
1730 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len);
1731 if (data->iov.iov_len) {
1732 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1733 data->iov.iov_len, BLOCK_ACCT_WRITE);
1734 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1735 * where final qiov may need smaller size */
1736 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1737 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1738 data->sector << BDRV_SECTOR_BITS,
1740 scsi_write_same_complete, data);
1741 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1745 scsi_req_complete(&r->req, GOOD);
1748 scsi_req_unref(&r->req);
1749 qemu_vfree(data->iov.iov_base);
1751 aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1754 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1756 SCSIRequest *req = &r->req;
1757 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1758 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1759 WriteSameCBData *data;
1763 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1764 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1765 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1769 if (blk_is_read_only(s->qdev.conf.blk)) {
1770 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1773 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1774 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1778 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1779 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1781 /* The request is used as the AIO opaque value, so add a ref. */
1782 scsi_req_ref(&r->req);
1783 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1784 nb_sectors * s->qdev.blocksize,
1786 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1787 r->req.cmd.lba * s->qdev.blocksize,
1788 nb_sectors * s->qdev.blocksize,
1789 flags, scsi_aio_complete, r);
1793 data = g_new0(WriteSameCBData, 1);
1795 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
1796 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512);
1797 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX);
1798 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1800 qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1802 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) {
1803 memcpy(&buf[i], inbuf, s->qdev.blocksize);
1806 scsi_req_ref(&r->req);
1807 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1808 data->iov.iov_len, BLOCK_ACCT_WRITE);
1809 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1810 data->sector << BDRV_SECTOR_BITS,
1812 scsi_write_same_complete, data);
1815 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1817 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1819 if (r->iov.iov_len) {
1820 int buflen = r->iov.iov_len;
1821 trace_scsi_disk_emulate_write_data(buflen);
1823 scsi_req_data(&r->req, buflen);
1827 switch (req->cmd.buf[0]) {
1829 case MODE_SELECT_10:
1830 /* This also clears the sense buffer for REQUEST SENSE. */
1831 scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1835 scsi_disk_emulate_unmap(r, r->iov.iov_base);
1841 if (r->req.status == -1) {
1842 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1848 scsi_disk_emulate_write_same(r, r->iov.iov_base);
1856 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1858 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1859 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1860 uint64_t nb_sectors;
1864 switch (req->cmd.buf[0]) {
1873 case ALLOW_MEDIUM_REMOVAL:
1874 case GET_CONFIGURATION:
1875 case GET_EVENT_STATUS_NOTIFICATION:
1876 case MECHANISM_STATUS:
1881 if (!blk_is_available(s->qdev.conf.blk)) {
1882 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1889 * FIXME: we shouldn't return anything bigger than 4k, but the code
1890 * requires the buffer to be as big as req->cmd.xfer in several
1891 * places. So, do not allow CDBs with a very large ALLOCATION
1892 * LENGTH. The real fix would be to modify scsi_read_data and
1893 * dma_buf_read, so that they return data beyond the buflen
1896 if (req->cmd.xfer > 65536) {
1897 goto illegal_request;
1899 r->buflen = MAX(4096, req->cmd.xfer);
1901 if (!r->iov.iov_base) {
1902 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
1905 buflen = req->cmd.xfer;
1906 outbuf = r->iov.iov_base;
1907 memset(outbuf, 0, r->buflen);
1908 switch (req->cmd.buf[0]) {
1909 case TEST_UNIT_READY:
1910 assert(blk_is_available(s->qdev.conf.blk));
1913 buflen = scsi_disk_emulate_inquiry(req, outbuf);
1915 goto illegal_request;
1920 buflen = scsi_disk_emulate_mode_sense(r, outbuf);
1922 goto illegal_request;
1926 buflen = scsi_disk_emulate_read_toc(req, outbuf);
1928 goto illegal_request;
1932 if (req->cmd.buf[1] & 1) {
1933 goto illegal_request;
1937 if (req->cmd.buf[1] & 3) {
1938 goto illegal_request;
1942 if (req->cmd.buf[1] & 1) {
1943 goto illegal_request;
1947 if (req->cmd.buf[1] & 3) {
1948 goto illegal_request;
1952 if (scsi_disk_emulate_start_stop(r) < 0) {
1956 case ALLOW_MEDIUM_REMOVAL:
1957 s->tray_locked = req->cmd.buf[4] & 1;
1958 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
1960 case READ_CAPACITY_10:
1961 /* The normal LEN field for this command is zero. */
1962 memset(outbuf, 0, 8);
1963 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1965 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
1968 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
1969 goto illegal_request;
1971 nb_sectors /= s->qdev.blocksize / 512;
1972 /* Returned value is the address of the last sector. */
1974 /* Remember the new size for read/write sanity checking. */
1975 s->qdev.max_lba = nb_sectors;
1976 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
1977 if (nb_sectors > UINT32_MAX) {
1978 nb_sectors = UINT32_MAX;
1980 outbuf[0] = (nb_sectors >> 24) & 0xff;
1981 outbuf[1] = (nb_sectors >> 16) & 0xff;
1982 outbuf[2] = (nb_sectors >> 8) & 0xff;
1983 outbuf[3] = nb_sectors & 0xff;
1986 outbuf[6] = s->qdev.blocksize >> 8;
1990 /* Just return "NO SENSE". */
1991 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
1992 (req->cmd.buf[1] & 1) == 0);
1994 goto illegal_request;
1997 case MECHANISM_STATUS:
1998 buflen = scsi_emulate_mechanism_status(s, outbuf);
2000 goto illegal_request;
2003 case GET_CONFIGURATION:
2004 buflen = scsi_get_configuration(s, outbuf);
2006 goto illegal_request;
2009 case GET_EVENT_STATUS_NOTIFICATION:
2010 buflen = scsi_get_event_status_notification(s, r, outbuf);
2012 goto illegal_request;
2015 case READ_DISC_INFORMATION:
2016 buflen = scsi_read_disc_information(s, r, outbuf);
2018 goto illegal_request;
2021 case READ_DVD_STRUCTURE:
2022 buflen = scsi_read_dvd_structure(s, r, outbuf);
2024 goto illegal_request;
2027 case SERVICE_ACTION_IN_16:
2028 /* Service Action In subcommands. */
2029 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2030 trace_scsi_disk_emulate_command_SAI_16();
2031 memset(outbuf, 0, req->cmd.xfer);
2032 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2034 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2037 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2038 goto illegal_request;
2040 nb_sectors /= s->qdev.blocksize / 512;
2041 /* Returned value is the address of the last sector. */
2043 /* Remember the new size for read/write sanity checking. */
2044 s->qdev.max_lba = nb_sectors;
2045 outbuf[0] = (nb_sectors >> 56) & 0xff;
2046 outbuf[1] = (nb_sectors >> 48) & 0xff;
2047 outbuf[2] = (nb_sectors >> 40) & 0xff;
2048 outbuf[3] = (nb_sectors >> 32) & 0xff;
2049 outbuf[4] = (nb_sectors >> 24) & 0xff;
2050 outbuf[5] = (nb_sectors >> 16) & 0xff;
2051 outbuf[6] = (nb_sectors >> 8) & 0xff;
2052 outbuf[7] = nb_sectors & 0xff;
2055 outbuf[10] = s->qdev.blocksize >> 8;
2058 outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2060 /* set TPE bit if the format supports discard */
2061 if (s->qdev.conf.discard_granularity) {
2065 /* Protection, exponent and lowest lba field left blank. */
2068 trace_scsi_disk_emulate_command_SAI_unsupported();
2069 goto illegal_request;
2070 case SYNCHRONIZE_CACHE:
2071 /* The request is used as the AIO opaque value, so add a ref. */
2072 scsi_req_ref(&r->req);
2073 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2075 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2078 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2079 if (r->req.cmd.lba > s->qdev.max_lba) {
2084 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2086 case MODE_SELECT_10:
2087 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2090 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2095 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2096 if (req->cmd.buf[1] & 6) {
2097 goto illegal_request;
2102 trace_scsi_disk_emulate_command_WRITE_SAME(
2103 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2106 trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2107 scsi_command_name(buf[0]));
2108 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2111 assert(!r->req.aiocb);
2112 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2113 if (r->iov.iov_len == 0) {
2114 scsi_req_complete(&r->req, GOOD);
2116 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2117 assert(r->iov.iov_len == req->cmd.xfer);
2118 return -r->iov.iov_len;
2120 return r->iov.iov_len;
2124 if (r->req.status == -1) {
2125 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2130 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2134 /* Execute a scsi command. Returns the length of the data expected by the
2135 command. This will be Positive for data transfers from the device
2136 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2137 and zero if the command does not transfer any data. */
2139 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2142 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2143 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2149 if (!blk_is_available(s->qdev.conf.blk)) {
2150 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2154 len = scsi_data_cdb_xfer(r->req.cmd.buf);
2160 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2161 /* Protection information is not supported. For SCSI versions 2 and
2162 * older (as determined by snooping the guest's INQUIRY commands),
2163 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2165 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2166 goto illegal_request;
2168 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2171 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2172 r->sector_count = len * (s->qdev.blocksize / 512);
2178 case WRITE_VERIFY_10:
2179 case WRITE_VERIFY_12:
2180 case WRITE_VERIFY_16:
2181 if (blk_is_read_only(s->qdev.conf.blk)) {
2182 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2185 trace_scsi_disk_dma_command_WRITE(
2186 (command & 0xe) == 0xe ? "And Verify " : "",
2187 r->req.cmd.lba, len);
2192 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2193 * As far as DMA is concerned, we can treat it the same as a write;
2194 * scsi_block_do_sgio will send VERIFY commands.
2196 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2197 goto illegal_request;
2199 if (!check_lba_range(s, r->req.cmd.lba, len)) {
2202 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
2203 r->sector_count = len * (s->qdev.blocksize / 512);
2208 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2211 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2214 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2215 if (r->sector_count == 0) {
2216 scsi_req_complete(&r->req, GOOD);
2218 assert(r->iov.iov_len == 0);
2219 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2220 return -r->sector_count * 512;
2222 return r->sector_count * 512;
2226 static void scsi_disk_reset(DeviceState *dev)
2228 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2229 uint64_t nb_sectors;
2231 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2233 blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2234 nb_sectors /= s->qdev.blocksize / 512;
2238 s->qdev.max_lba = nb_sectors;
2239 /* reset tray statuses */
2243 s->qdev.scsi_version = s->qdev.default_scsi_version;
2246 static void scsi_disk_resize_cb(void *opaque)
2248 SCSIDiskState *s = opaque;
2250 /* SPC lists this sense code as available only for
2251 * direct-access devices.
2253 if (s->qdev.type == TYPE_DISK) {
2254 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2258 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2260 SCSIDiskState *s = opaque;
2263 * When a CD gets changed, we have to report an ejected state and
2264 * then a loaded state to guests so that they detect tray
2265 * open/close and media change events. Guests that do not use
2266 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2267 * states rely on this behavior.
2269 * media_changed governs the state machine used for unit attention
2270 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2272 s->media_changed = load;
2273 s->tray_open = !load;
2274 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2275 s->media_event = true;
2276 s->eject_request = false;
2279 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2281 SCSIDiskState *s = opaque;
2283 s->eject_request = true;
2285 s->tray_locked = false;
2289 static bool scsi_cd_is_tray_open(void *opaque)
2291 return ((SCSIDiskState *)opaque)->tray_open;
2294 static bool scsi_cd_is_medium_locked(void *opaque)
2296 return ((SCSIDiskState *)opaque)->tray_locked;
2299 static const BlockDevOps scsi_disk_removable_block_ops = {
2300 .change_media_cb = scsi_cd_change_media_cb,
2301 .eject_request_cb = scsi_cd_eject_request_cb,
2302 .is_tray_open = scsi_cd_is_tray_open,
2303 .is_medium_locked = scsi_cd_is_medium_locked,
2305 .resize_cb = scsi_disk_resize_cb,
2308 static const BlockDevOps scsi_disk_block_ops = {
2309 .resize_cb = scsi_disk_resize_cb,
2312 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2314 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2315 if (s->media_changed) {
2316 s->media_changed = false;
2317 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2321 static void scsi_realize(SCSIDevice *dev, Error **errp)
2323 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2326 if (!s->qdev.conf.blk) {
2327 error_setg(errp, "drive property not set");
2331 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2332 !blk_is_inserted(s->qdev.conf.blk)) {
2333 error_setg(errp, "Device needs media, but drive is empty");
2337 blkconf_blocksizes(&s->qdev.conf);
2339 if (s->qdev.conf.logical_block_size >
2340 s->qdev.conf.physical_block_size) {
2342 "logical_block_size > physical_block_size not supported");
2346 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2347 !s->qdev.hba_supports_iothread)
2349 error_setg(errp, "HBA does not support iothreads");
2353 if (dev->type == TYPE_DISK) {
2354 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2359 read_only = blk_is_read_only(s->qdev.conf.blk);
2360 if (dev->type == TYPE_ROM) {
2364 if (!blkconf_apply_backend_options(&dev->conf, read_only,
2365 dev->type == TYPE_DISK, errp)) {
2369 if (s->qdev.conf.discard_granularity == -1) {
2370 s->qdev.conf.discard_granularity =
2371 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2375 s->version = g_strdup(qemu_hw_version());
2378 s->vendor = g_strdup("QEMU");
2380 if (!s->device_id) {
2382 s->device_id = g_strdup_printf("%.20s", s->serial);
2384 const char *str = blk_name(s->qdev.conf.blk);
2386 s->device_id = g_strdup(str);
2391 if (blk_is_sg(s->qdev.conf.blk)) {
2392 error_setg(errp, "unwanted /dev/sg*");
2396 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2397 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2398 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2400 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2402 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize);
2404 blk_iostatus_enable(s->qdev.conf.blk);
2407 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2409 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2410 AioContext *ctx = NULL;
2411 /* can happen for devices without drive. The error message for missing
2412 * backend will be issued in scsi_realize
2414 if (s->qdev.conf.blk) {
2415 ctx = blk_get_aio_context(s->qdev.conf.blk);
2416 aio_context_acquire(ctx);
2417 blkconf_blocksizes(&s->qdev.conf);
2419 s->qdev.blocksize = s->qdev.conf.logical_block_size;
2420 s->qdev.type = TYPE_DISK;
2422 s->product = g_strdup("QEMU HARDDISK");
2424 scsi_realize(&s->qdev, errp);
2426 aio_context_release(ctx);
2430 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2436 if (!dev->conf.blk) {
2437 /* Anonymous BlockBackend for an empty drive. As we put it into
2438 * dev->conf, qdev takes care of detaching on unplug. */
2439 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2440 ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2444 ctx = blk_get_aio_context(dev->conf.blk);
2445 aio_context_acquire(ctx);
2446 s->qdev.blocksize = 2048;
2447 s->qdev.type = TYPE_ROM;
2448 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2450 s->product = g_strdup("QEMU CD-ROM");
2452 scsi_realize(&s->qdev, errp);
2453 aio_context_release(ctx);
2456 static void scsi_disk_realize(SCSIDevice *dev, Error **errp)
2459 Error *local_err = NULL;
2461 if (!dev->conf.blk) {
2462 scsi_realize(dev, &local_err);
2464 error_propagate(errp, local_err);
2468 dinfo = blk_legacy_dinfo(dev->conf.blk);
2469 if (dinfo && dinfo->media_cd) {
2470 scsi_cd_realize(dev, errp);
2472 scsi_hd_realize(dev, errp);
2476 static const SCSIReqOps scsi_disk_emulate_reqops = {
2477 .size = sizeof(SCSIDiskReq),
2478 .free_req = scsi_free_request,
2479 .send_command = scsi_disk_emulate_command,
2480 .read_data = scsi_disk_emulate_read_data,
2481 .write_data = scsi_disk_emulate_write_data,
2482 .get_buf = scsi_get_buf,
2485 static const SCSIReqOps scsi_disk_dma_reqops = {
2486 .size = sizeof(SCSIDiskReq),
2487 .free_req = scsi_free_request,
2488 .send_command = scsi_disk_dma_command,
2489 .read_data = scsi_read_data,
2490 .write_data = scsi_write_data,
2491 .get_buf = scsi_get_buf,
2492 .load_request = scsi_disk_load_request,
2493 .save_request = scsi_disk_save_request,
2496 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2497 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
2498 [INQUIRY] = &scsi_disk_emulate_reqops,
2499 [MODE_SENSE] = &scsi_disk_emulate_reqops,
2500 [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
2501 [START_STOP] = &scsi_disk_emulate_reqops,
2502 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
2503 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
2504 [READ_TOC] = &scsi_disk_emulate_reqops,
2505 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
2506 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
2507 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
2508 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
2509 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
2510 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
2511 [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
2512 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
2513 [SEEK_10] = &scsi_disk_emulate_reqops,
2514 [MODE_SELECT] = &scsi_disk_emulate_reqops,
2515 [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
2516 [UNMAP] = &scsi_disk_emulate_reqops,
2517 [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
2518 [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
2519 [VERIFY_10] = &scsi_disk_emulate_reqops,
2520 [VERIFY_12] = &scsi_disk_emulate_reqops,
2521 [VERIFY_16] = &scsi_disk_emulate_reqops,
2523 [READ_6] = &scsi_disk_dma_reqops,
2524 [READ_10] = &scsi_disk_dma_reqops,
2525 [READ_12] = &scsi_disk_dma_reqops,
2526 [READ_16] = &scsi_disk_dma_reqops,
2527 [WRITE_6] = &scsi_disk_dma_reqops,
2528 [WRITE_10] = &scsi_disk_dma_reqops,
2529 [WRITE_12] = &scsi_disk_dma_reqops,
2530 [WRITE_16] = &scsi_disk_dma_reqops,
2531 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
2532 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
2533 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
2536 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2539 int len = scsi_cdb_length(buf);
2540 char *line_buffer, *p;
2542 line_buffer = g_malloc(len * 5 + 1);
2544 for (i = 0, p = line_buffer; i < len; i++) {
2545 p += sprintf(p, " 0x%02x", buf[i]);
2547 trace_scsi_disk_new_request(lun, tag, line_buffer);
2549 g_free(line_buffer);
2552 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2553 uint8_t *buf, void *hba_private)
2555 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2557 const SCSIReqOps *ops;
2561 ops = scsi_disk_reqops_dispatch[command];
2563 ops = &scsi_disk_emulate_reqops;
2565 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2567 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2568 scsi_disk_new_request_dump(lun, tag, buf);
2575 static int get_device_type(SCSIDiskState *s)
2581 memset(cmd, 0, sizeof(cmd));
2582 memset(buf, 0, sizeof(buf));
2584 cmd[4] = sizeof(buf);
2586 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2591 s->qdev.type = buf[0];
2592 if (buf[1] & 0x80) {
2593 s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2598 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2600 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2605 if (!s->qdev.conf.blk) {
2606 error_setg(errp, "drive property not set");
2610 if (s->rotation_rate) {
2611 error_report_once("rotation_rate is specified for scsi-block but is "
2612 "not implemented. This option is deprecated and will "
2613 "be removed in a future version");
2616 ctx = blk_get_aio_context(s->qdev.conf.blk);
2617 aio_context_acquire(ctx);
2619 /* check we are using a driver managing SG_IO (version 3 and after) */
2620 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2622 error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2624 error_append_hint(errp, "Is this a SCSI device?\n");
2628 if (sg_version < 30000) {
2629 error_setg(errp, "scsi generic interface too old");
2633 /* get device type from INQUIRY data */
2634 rc = get_device_type(s);
2636 error_setg(errp, "INQUIRY failed");
2640 /* Make a guess for the block size, we'll fix it when the guest sends.
2641 * READ CAPACITY. If they don't, they likely would assume these sizes
2642 * anyway. (TODO: check in /sys).
2644 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2645 s->qdev.blocksize = 2048;
2647 s->qdev.blocksize = 512;
2650 /* Makes the scsi-block device not removable by using HMP and QMP eject
2653 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2655 scsi_realize(&s->qdev, errp);
2656 scsi_generic_read_device_inquiry(&s->qdev);
2659 aio_context_release(ctx);
2662 typedef struct SCSIBlockReq {
2664 sg_io_hdr_t io_header;
2666 /* Selected bytes of the original CDB, copied into our own CDB. */
2667 uint8_t cmd, cdb1, group_number;
2669 /* CDB passed to SG_IO. */
2673 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2674 int64_t offset, QEMUIOVector *iov,
2676 BlockCompletionFunc *cb, void *opaque)
2678 sg_io_hdr_t *io_header = &req->io_header;
2679 SCSIDiskReq *r = &req->req;
2680 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2681 int nb_logical_blocks;
2685 /* This is not supported yet. It can only happen if the guest does
2686 * reads and writes that are not aligned to one logical sectors
2687 * _and_ cover multiple MemoryRegions.
2689 assert(offset % s->qdev.blocksize == 0);
2690 assert(iov->size % s->qdev.blocksize == 0);
2692 io_header->interface_id = 'S';
2694 /* The data transfer comes from the QEMUIOVector. */
2695 io_header->dxfer_direction = direction;
2696 io_header->dxfer_len = iov->size;
2697 io_header->dxferp = (void *)iov->iov;
2698 io_header->iovec_count = iov->niov;
2699 assert(io_header->iovec_count == iov->niov); /* no overflow! */
2701 /* Build a new CDB with the LBA and length patched in, in case
2702 * DMA helpers split the transfer in multiple segments. Do not
2703 * build a CDB smaller than what the guest wanted, and only build
2704 * a larger one if strictly necessary.
2706 io_header->cmdp = req->cdb;
2707 lba = offset / s->qdev.blocksize;
2708 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2710 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2712 stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2713 req->cdb[4] = nb_logical_blocks;
2715 io_header->cmd_len = 6;
2716 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2718 req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2719 req->cdb[1] = req->cdb1;
2720 stl_be_p(&req->cdb[2], lba);
2721 req->cdb[6] = req->group_number;
2722 stw_be_p(&req->cdb[7], nb_logical_blocks);
2724 io_header->cmd_len = 10;
2725 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2727 req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2728 req->cdb[1] = req->cdb1;
2729 stl_be_p(&req->cdb[2], lba);
2730 stl_be_p(&req->cdb[6], nb_logical_blocks);
2731 req->cdb[10] = req->group_number;
2733 io_header->cmd_len = 12;
2736 req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2737 req->cdb[1] = req->cdb1;
2738 stq_be_p(&req->cdb[2], lba);
2739 stl_be_p(&req->cdb[10], nb_logical_blocks);
2740 req->cdb[14] = req->group_number;
2742 io_header->cmd_len = 16;
2745 /* The rest is as in scsi-generic.c. */
2746 io_header->mx_sb_len = sizeof(r->req.sense);
2747 io_header->sbp = r->req.sense;
2748 io_header->timeout = UINT_MAX;
2749 io_header->usr_ptr = r;
2750 io_header->flags |= SG_FLAG_DIRECT_IO;
2752 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
2753 assert(aiocb != NULL);
2757 static bool scsi_block_no_fua(SCSICommand *cmd)
2762 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2764 BlockCompletionFunc *cb, void *cb_opaque,
2767 SCSIBlockReq *r = opaque;
2768 return scsi_block_do_sgio(r, offset, iov,
2769 SG_DXFER_FROM_DEV, cb, cb_opaque);
2772 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2774 BlockCompletionFunc *cb, void *cb_opaque,
2777 SCSIBlockReq *r = opaque;
2778 return scsi_block_do_sgio(r, offset, iov,
2779 SG_DXFER_TO_DEV, cb, cb_opaque);
2782 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2788 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2789 * for the number of logical blocks specified in the length
2790 * field). For other modes, do not use scatter/gather operation.
2792 if ((buf[1] & 6) == 2) {
2805 case WRITE_VERIFY_10:
2806 case WRITE_VERIFY_12:
2807 case WRITE_VERIFY_16:
2808 /* MMC writing cannot be done via DMA helpers, because it sometimes
2809 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2810 * We might use scsi_block_dma_reqops as long as no writing commands are
2811 * seen, but performance usually isn't paramount on optical media. So,
2812 * just make scsi-block operate the same as scsi-generic for them.
2814 if (s->qdev.type != TYPE_ROM) {
2827 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2829 SCSIBlockReq *r = (SCSIBlockReq *)req;
2830 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2832 r->cmd = req->cmd.buf[0];
2833 switch (r->cmd >> 5) {
2836 r->cdb1 = r->group_number = 0;
2840 r->cdb1 = req->cmd.buf[1];
2841 r->group_number = req->cmd.buf[6];
2845 r->cdb1 = req->cmd.buf[1];
2846 r->group_number = req->cmd.buf[10];
2850 r->cdb1 = req->cmd.buf[1];
2851 r->group_number = req->cmd.buf[14];
2857 /* Protection information is not supported. For SCSI versions 2 and
2858 * older (as determined by snooping the guest's INQUIRY commands),
2859 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2861 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
2862 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
2866 r->req.status = &r->io_header.status;
2867 return scsi_disk_dma_command(req, buf);
2870 static const SCSIReqOps scsi_block_dma_reqops = {
2871 .size = sizeof(SCSIBlockReq),
2872 .free_req = scsi_free_request,
2873 .send_command = scsi_block_dma_command,
2874 .read_data = scsi_read_data,
2875 .write_data = scsi_write_data,
2876 .get_buf = scsi_get_buf,
2877 .load_request = scsi_disk_load_request,
2878 .save_request = scsi_disk_save_request,
2881 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
2882 uint32_t lun, uint8_t *buf,
2885 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2887 if (scsi_block_is_passthrough(s, buf)) {
2888 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
2891 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
2896 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
2897 uint8_t *buf, void *hba_private)
2899 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2901 if (scsi_block_is_passthrough(s, buf)) {
2902 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private);
2904 return scsi_req_parse_cdb(&s->qdev, cmd, buf);
2908 static void scsi_block_update_sense(SCSIRequest *req)
2910 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2911 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
2912 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
2917 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
2918 BlockCompletionFunc *cb, void *cb_opaque,
2921 SCSIDiskReq *r = opaque;
2922 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2923 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2927 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
2928 BlockCompletionFunc *cb, void *cb_opaque,
2931 SCSIDiskReq *r = opaque;
2932 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2933 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
2936 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
2938 DeviceClass *dc = DEVICE_CLASS(klass);
2939 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
2941 dc->fw_name = "disk";
2942 dc->reset = scsi_disk_reset;
2943 sdc->dma_readv = scsi_dma_readv;
2944 sdc->dma_writev = scsi_dma_writev;
2945 sdc->need_fua_emulation = scsi_is_cmd_fua;
2948 static const TypeInfo scsi_disk_base_info = {
2949 .name = TYPE_SCSI_DISK_BASE,
2950 .parent = TYPE_SCSI_DEVICE,
2951 .class_init = scsi_disk_base_class_initfn,
2952 .instance_size = sizeof(SCSIDiskState),
2953 .class_size = sizeof(SCSIDiskClass),
2957 #define DEFINE_SCSI_DISK_PROPERTIES() \
2958 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
2959 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
2960 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
2961 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
2962 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
2963 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
2964 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
2965 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
2968 static Property scsi_hd_properties[] = {
2969 DEFINE_SCSI_DISK_PROPERTIES(),
2970 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
2971 SCSI_DISK_F_REMOVABLE, false),
2972 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
2973 SCSI_DISK_F_DPOFUA, false),
2974 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
2975 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
2976 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
2977 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
2978 DEFAULT_MAX_UNMAP_SIZE),
2979 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
2980 DEFAULT_MAX_IO_SIZE),
2981 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
2982 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
2984 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
2985 DEFINE_PROP_END_OF_LIST(),
2988 static const VMStateDescription vmstate_scsi_disk_state = {
2989 .name = "scsi-disk",
2991 .minimum_version_id = 1,
2992 .fields = (VMStateField[]) {
2993 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
2994 VMSTATE_BOOL(media_changed, SCSIDiskState),
2995 VMSTATE_BOOL(media_event, SCSIDiskState),
2996 VMSTATE_BOOL(eject_request, SCSIDiskState),
2997 VMSTATE_BOOL(tray_open, SCSIDiskState),
2998 VMSTATE_BOOL(tray_locked, SCSIDiskState),
2999 VMSTATE_END_OF_LIST()
3003 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3005 DeviceClass *dc = DEVICE_CLASS(klass);
3006 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3008 sc->realize = scsi_hd_realize;
3009 sc->alloc_req = scsi_new_request;
3010 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3011 dc->desc = "virtual SCSI disk";
3012 dc->props = scsi_hd_properties;
3013 dc->vmsd = &vmstate_scsi_disk_state;
3016 static const TypeInfo scsi_hd_info = {
3018 .parent = TYPE_SCSI_DISK_BASE,
3019 .class_init = scsi_hd_class_initfn,
3022 static Property scsi_cd_properties[] = {
3023 DEFINE_SCSI_DISK_PROPERTIES(),
3024 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3025 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3026 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3027 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3028 DEFAULT_MAX_IO_SIZE),
3029 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3031 DEFINE_PROP_END_OF_LIST(),
3034 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3036 DeviceClass *dc = DEVICE_CLASS(klass);
3037 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3039 sc->realize = scsi_cd_realize;
3040 sc->alloc_req = scsi_new_request;
3041 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3042 dc->desc = "virtual SCSI CD-ROM";
3043 dc->props = scsi_cd_properties;
3044 dc->vmsd = &vmstate_scsi_disk_state;
3047 static const TypeInfo scsi_cd_info = {
3049 .parent = TYPE_SCSI_DISK_BASE,
3050 .class_init = scsi_cd_class_initfn,
3054 static Property scsi_block_properties[] = {
3055 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3056 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3057 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3058 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3059 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3060 DEFAULT_MAX_UNMAP_SIZE),
3061 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3062 DEFAULT_MAX_IO_SIZE),
3063 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3065 DEFINE_PROP_END_OF_LIST(),
3068 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3070 DeviceClass *dc = DEVICE_CLASS(klass);
3071 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3072 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3074 sc->realize = scsi_block_realize;
3075 sc->alloc_req = scsi_block_new_request;
3076 sc->parse_cdb = scsi_block_parse_cdb;
3077 sdc->dma_readv = scsi_block_dma_readv;
3078 sdc->dma_writev = scsi_block_dma_writev;
3079 sdc->update_sense = scsi_block_update_sense;
3080 sdc->need_fua_emulation = scsi_block_no_fua;
3081 dc->desc = "SCSI block device passthrough";
3082 dc->props = scsi_block_properties;
3083 dc->vmsd = &vmstate_scsi_disk_state;
3086 static const TypeInfo scsi_block_info = {
3087 .name = "scsi-block",
3088 .parent = TYPE_SCSI_DISK_BASE,
3089 .class_init = scsi_block_class_initfn,
3093 static Property scsi_disk_properties[] = {
3094 DEFINE_SCSI_DISK_PROPERTIES(),
3095 DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3096 SCSI_DISK_F_REMOVABLE, false),
3097 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3098 SCSI_DISK_F_DPOFUA, false),
3099 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3100 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3101 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3102 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3103 DEFAULT_MAX_UNMAP_SIZE),
3104 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3105 DEFAULT_MAX_IO_SIZE),
3106 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3108 DEFINE_PROP_END_OF_LIST(),
3111 static void scsi_disk_class_initfn(ObjectClass *klass, void *data)
3113 DeviceClass *dc = DEVICE_CLASS(klass);
3114 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3116 sc->realize = scsi_disk_realize;
3117 sc->alloc_req = scsi_new_request;
3118 sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3119 dc->fw_name = "disk";
3120 dc->desc = "virtual SCSI disk or CD-ROM (legacy)";
3121 dc->reset = scsi_disk_reset;
3122 dc->props = scsi_disk_properties;
3123 dc->vmsd = &vmstate_scsi_disk_state;
3126 static const TypeInfo scsi_disk_info = {
3127 .name = "scsi-disk",
3128 .parent = TYPE_SCSI_DISK_BASE,
3129 .class_init = scsi_disk_class_initfn,
3132 static void scsi_disk_register_types(void)
3134 type_register_static(&scsi_disk_base_info);
3135 type_register_static(&scsi_hd_info);
3136 type_register_static(&scsi_cd_info);
3138 type_register_static(&scsi_block_info);
3140 type_register_static(&scsi_disk_info);
3143 type_init(scsi_disk_register_types)