#include <hw/hw.h>
#include <hw/pc.h>
#include <hw/pci.h>
+#include <hw/isa.h>
#include "qemu-error.h"
#include "qemu-timer.h"
#include "sysemu.h"
#include "dma.h"
#include "blockdev.h"
+#include "block_int.h"
#include <hw/ide/internal.h>
};
static int ide_handle_rw_error(IDEState *s, int error, int op);
+static void ide_dummy_transfer_stop(IDEState *s);
static void padstr(char *str, const char *src, int len)
{
{
uint16_t *p;
unsigned int oldsize;
- IDEDevice *dev;
+ IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
if (s->identify_set) {
memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data));
put_le16(p + 66, 120);
put_le16(p + 67, 120);
put_le16(p + 68, 120);
+ if (dev && dev->conf.discard_granularity) {
+ put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
+ }
if (s->ncq_queues) {
put_le16(p + 75, s->ncq_queues - 1);
put_le16(p + 101, s->nb_sectors >> 16);
put_le16(p + 102, s->nb_sectors >> 32);
put_le16(p + 103, s->nb_sectors >> 48);
- dev = s->unit ? s->bus->slave : s->bus->master;
+
if (dev && dev->conf.physical_block_size)
put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
+ if (dev && dev->conf.discard_granularity) {
+ put_le16(p + 169, 1); /* TRIM support */
+ }
memcpy(s->identify_data, p, sizeof(s->identify_data));
s->identify_set = 1;
}
}
+typedef struct TrimAIOCB {
+ BlockDriverAIOCB common;
+ QEMUBH *bh;
+ int ret;
+} TrimAIOCB;
+
+static void trim_aio_cancel(BlockDriverAIOCB *acb)
+{
+ TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
+
+ qemu_bh_delete(iocb->bh);
+ iocb->bh = NULL;
+ qemu_aio_release(iocb);
+}
+
+static AIOPool trim_aio_pool = {
+ .aiocb_size = sizeof(TrimAIOCB),
+ .cancel = trim_aio_cancel,
+};
+
+static void ide_trim_bh_cb(void *opaque)
+{
+ TrimAIOCB *iocb = opaque;
+
+ iocb->common.cb(iocb->common.opaque, iocb->ret);
+
+ qemu_bh_delete(iocb->bh);
+ iocb->bh = NULL;
+
+ qemu_aio_release(iocb);
+}
+
+BlockDriverAIOCB *ide_issue_trim(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ TrimAIOCB *iocb;
+ int i, j, ret;
+
+ iocb = qemu_aio_get(&trim_aio_pool, bs, cb, opaque);
+ iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
+ iocb->ret = 0;
+
+ for (j = 0; j < qiov->niov; j++) {
+ uint64_t *buffer = qiov->iov[j].iov_base;
+
+ for (i = 0; i < qiov->iov[j].iov_len / 8; i++) {
+ /* 6-byte LBA + 2-byte range per entry */
+ uint64_t entry = le64_to_cpu(buffer[i]);
+ uint64_t sector = entry & 0x0000ffffffffffffULL;
+ uint16_t count = entry >> 48;
+
+ if (count == 0) {
+ break;
+ }
+
+ ret = bdrv_discard(bs, sector, count);
+ if (!iocb->ret) {
+ iocb->ret = ret;
+ }
+ }
+ }
+
+ qemu_bh_schedule(iocb->bh);
+
+ return &iocb->common;
+}
+
static inline void ide_abort_command(IDEState *s)
{
s->status = READY_STAT | ERR_STAT;
#endif
if (n > s->req_nb_sectors)
n = s->req_nb_sectors;
+
+ bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
ret = bdrv_read(s->bs, sector_num, s->io_buffer, n);
+ bdrv_acct_done(s->bs, &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret,
BM_STATUS_PIO_RETRY | BM_STATUS_RETRY_READ))
s->error = ABRT_ERR;
s->status = READY_STAT | ERR_STAT;
ide_set_inactive(s);
- s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
ide_set_irq(s->bus);
}
if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC)
|| action == BLOCK_ERR_STOP_ANY) {
s->bus->dma->ops->set_unit(s->bus->dma, s->unit);
- s->bus->dma->ops->add_status(s->bus->dma, op);
+ s->bus->error_status = op;
bdrv_mon_event(s->bs, BDRV_ACTION_STOP, is_read);
- vm_stop(VMSTOP_DISKFULL);
+ vm_stop(RUN_STATE_IO_ERROR);
+ bdrv_iostatus_set_err(s->bs, error);
} else {
if (op & BM_STATUS_DMA_RETRY) {
dma_buf_commit(s, 0);
if (ret < 0) {
int op = BM_STATUS_DMA_RETRY;
- if (s->is_read)
+ if (s->dma_cmd == IDE_DMA_READ)
op |= BM_STATUS_RETRY_READ;
+ else if (s->dma_cmd == IDE_DMA_TRIM)
+ op |= BM_STATUS_RETRY_TRIM;
+
if (ide_handle_rw_error(s, -ret, op)) {
return;
}
n = s->io_buffer_size >> 9;
sector_num = ide_get_sector(s);
if (n > 0) {
- dma_buf_commit(s, s->is_read);
+ dma_buf_commit(s, ide_cmd_is_read(s));
sector_num += n;
ide_set_sector(s, sector_num);
s->nsector -= n;
n = s->nsector;
s->io_buffer_index = 0;
s->io_buffer_size = n * 512;
- if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->is_read) == 0)
+ if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) == 0) {
+ /* The PRDs were too short. Reset the Active bit, but don't raise an
+ * interrupt. */
goto eot;
+ }
#ifdef DEBUG_AIO
- printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, is_read=%d\n",
- sector_num, n, s->is_read);
+ printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
+ sector_num, n, s->dma_cmd);
#endif
- if (s->is_read) {
+ switch (s->dma_cmd) {
+ case IDE_DMA_READ:
s->bus->dma->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
ide_dma_cb, s);
- } else {
+ break;
+ case IDE_DMA_WRITE:
s->bus->dma->aiocb = dma_bdrv_write(s->bs, &s->sg, sector_num,
ide_dma_cb, s);
+ break;
+ case IDE_DMA_TRIM:
+ s->bus->dma->aiocb = dma_bdrv_io(s->bs, &s->sg, sector_num,
+ ide_issue_trim, ide_dma_cb, s, true);
+ break;
}
if (!s->bus->dma->aiocb) {
return;
eot:
- s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_INT);
- ide_set_inactive(s);
+ if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
+ bdrv_acct_done(s->bs, &s->acct);
+ }
+ ide_set_inactive(s);
}
-static void ide_sector_start_dma(IDEState *s, int is_read)
+static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
{
s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
s->io_buffer_index = 0;
s->io_buffer_size = 0;
- s->is_read = is_read;
+ s->dma_cmd = dma_cmd;
+
+ switch (dma_cmd) {
+ case IDE_DMA_READ:
+ bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_READ);
+ break;
+ case IDE_DMA_WRITE:
+ bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
+ BDRV_ACCT_WRITE);
+ break;
+ default:
+ break;
+ }
+
s->bus->dma->ops->start_dma(s->bus->dma, s, ide_dma_cb);
}
n = s->nsector;
if (n > s->req_nb_sectors)
n = s->req_nb_sectors;
+
+ bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
ret = bdrv_write(s->bs, sector_num, s->io_buffer, n);
+ bdrv_acct_done(s->bs, &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY))
}
}
+ bdrv_acct_done(s->bs, &s->acct);
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
}
return;
}
+ bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
acb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
if (acb == NULL) {
ide_flush_cb(s, -EIO);
}
/* called when the inserted state of the media has changed */
-static void cdrom_change_cb(void *opaque, int reason)
+static void ide_cd_change_cb(void *opaque, bool load)
{
IDEState *s = opaque;
uint64_t nb_sectors;
- if (!(reason & CHANGE_MEDIA)) {
- return;
- }
-
+ s->tray_open = !load;
bdrv_get_geometry(s->bs, &nb_sectors);
s->nb_sectors = nb_sectors;
* First indicate to the guest that a CD has been removed. That's
* done on the next command the guest sends us.
*
- * Then we set SENSE_UNIT_ATTENTION, by which the guest will
+ * Then we set UNIT_ATTENTION, by which the guest will
* detect a new CD in the drive. See ide_atapi_cmd() for details.
*/
s->cdrom_changed = 1;
}
}
+#define HD_OK (1u << IDE_HD)
+#define CD_OK (1u << IDE_CD)
+#define CFA_OK (1u << IDE_CFATA)
+#define HD_CFA_OK (HD_OK | CFA_OK)
+#define ALL_OK (HD_OK | CD_OK | CFA_OK)
+
+/* See ACS-2 T13/2015-D Table B.2 Command codes */
+static const uint8_t ide_cmd_table[0x100] = {
+ /* NOP not implemented, mandatory for CD */
+ [CFA_REQ_EXT_ERROR_CODE] = CFA_OK,
+ [WIN_DSM] = ALL_OK,
+ [WIN_DEVICE_RESET] = CD_OK,
+ [WIN_RECAL] = HD_CFA_OK,
+ [WIN_READ] = ALL_OK,
+ [WIN_READ_ONCE] = ALL_OK,
+ [WIN_READ_EXT] = HD_CFA_OK,
+ [WIN_READDMA_EXT] = HD_CFA_OK,
+ [WIN_READ_NATIVE_MAX_EXT] = HD_CFA_OK,
+ [WIN_MULTREAD_EXT] = HD_CFA_OK,
+ [WIN_WRITE] = HD_CFA_OK,
+ [WIN_WRITE_ONCE] = HD_CFA_OK,
+ [WIN_WRITE_EXT] = HD_CFA_OK,
+ [WIN_WRITEDMA_EXT] = HD_CFA_OK,
+ [CFA_WRITE_SECT_WO_ERASE] = CFA_OK,
+ [WIN_MULTWRITE_EXT] = HD_CFA_OK,
+ [WIN_WRITE_VERIFY] = HD_CFA_OK,
+ [WIN_VERIFY] = HD_CFA_OK,
+ [WIN_VERIFY_ONCE] = HD_CFA_OK,
+ [WIN_VERIFY_EXT] = HD_CFA_OK,
+ [WIN_SEEK] = HD_CFA_OK,
+ [CFA_TRANSLATE_SECTOR] = CFA_OK,
+ [WIN_DIAGNOSE] = ALL_OK,
+ [WIN_SPECIFY] = HD_CFA_OK,
+ [WIN_STANDBYNOW2] = ALL_OK,
+ [WIN_IDLEIMMEDIATE2] = ALL_OK,
+ [WIN_STANDBY2] = ALL_OK,
+ [WIN_SETIDLE2] = ALL_OK,
+ [WIN_CHECKPOWERMODE2] = ALL_OK,
+ [WIN_SLEEPNOW2] = ALL_OK,
+ [WIN_PACKETCMD] = CD_OK,
+ [WIN_PIDENTIFY] = CD_OK,
+ [WIN_SMART] = HD_CFA_OK,
+ [CFA_ACCESS_METADATA_STORAGE] = CFA_OK,
+ [CFA_ERASE_SECTORS] = CFA_OK,
+ [WIN_MULTREAD] = HD_CFA_OK,
+ [WIN_MULTWRITE] = HD_CFA_OK,
+ [WIN_SETMULT] = HD_CFA_OK,
+ [WIN_READDMA] = HD_CFA_OK,
+ [WIN_READDMA_ONCE] = HD_CFA_OK,
+ [WIN_WRITEDMA] = HD_CFA_OK,
+ [WIN_WRITEDMA_ONCE] = HD_CFA_OK,
+ [CFA_WRITE_MULTI_WO_ERASE] = CFA_OK,
+ [WIN_STANDBYNOW1] = ALL_OK,
+ [WIN_IDLEIMMEDIATE] = ALL_OK,
+ [WIN_STANDBY] = ALL_OK,
+ [WIN_SETIDLE1] = ALL_OK,
+ [WIN_CHECKPOWERMODE1] = ALL_OK,
+ [WIN_SLEEPNOW1] = ALL_OK,
+ [WIN_FLUSH_CACHE] = ALL_OK,
+ [WIN_FLUSH_CACHE_EXT] = HD_CFA_OK,
+ [WIN_IDENTIFY] = ALL_OK,
+ [WIN_SETFEATURES] = ALL_OK,
+ [IBM_SENSE_CONDITION] = CFA_OK,
+ [CFA_WEAR_LEVEL] = CFA_OK,
+ [WIN_READ_NATIVE_MAX] = ALL_OK,
+};
+
+static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
+{
+ return cmd < ARRAY_SIZE(ide_cmd_table)
+ && (ide_cmd_table[cmd] & (1u << s->drive_kind));
+}
void ide_exec_cmd(IDEBus *bus, uint32_t val)
{
if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
return;
+ if (!ide_cmd_permitted(s, val)) {
+ goto abort_cmd;
+ }
+
switch(val) {
+ case WIN_DSM:
+ switch (s->feature) {
+ case DSM_TRIM:
+ if (!s->bs) {
+ goto abort_cmd;
+ }
+ ide_sector_start_dma(s, IDE_DMA_TRIM);
+ break;
+ default:
+ goto abort_cmd;
+ }
+ break;
case WIN_IDENTIFY:
if (s->bs && s->drive_kind != IDE_CD) {
if (s->drive_kind != IDE_CFATA)
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
break;
- case WIN_READ_EXT:
+ case WIN_READ_EXT:
lba48 = 1;
case WIN_READ:
case WIN_READ_ONCE:
- if (!s->bs)
+ if (s->drive_kind == IDE_CD) {
+ ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
goto abort_cmd;
+ }
ide_cmd_lba48_transform(s, lba48);
s->req_nb_sectors = 1;
ide_sector_read(s);
break;
- case WIN_WRITE_EXT:
+ case WIN_WRITE_EXT:
lba48 = 1;
case WIN_WRITE:
case WIN_WRITE_ONCE:
ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
s->media_changed = 1;
break;
- case WIN_MULTREAD_EXT:
+ case WIN_MULTREAD_EXT:
lba48 = 1;
case WIN_MULTREAD:
if (!s->mult_sectors)
ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
s->media_changed = 1;
break;
- case WIN_READDMA_EXT:
+ case WIN_READDMA_EXT:
lba48 = 1;
case WIN_READDMA:
case WIN_READDMA_ONCE:
if (!s->bs)
goto abort_cmd;
ide_cmd_lba48_transform(s, lba48);
- ide_sector_start_dma(s, 1);
+ ide_sector_start_dma(s, IDE_DMA_READ);
break;
- case WIN_WRITEDMA_EXT:
+ case WIN_WRITEDMA_EXT:
lba48 = 1;
case WIN_WRITEDMA:
case WIN_WRITEDMA_ONCE:
if (!s->bs)
goto abort_cmd;
ide_cmd_lba48_transform(s, lba48);
- ide_sector_start_dma(s, 0);
+ ide_sector_start_dma(s, IDE_DMA_WRITE);
s->media_changed = 1;
break;
case WIN_READ_NATIVE_MAX_EXT:
case WIN_STANDBYNOW1:
case WIN_STANDBYNOW2:
case WIN_IDLEIMMEDIATE:
- case CFA_IDLEIMMEDIATE:
+ case WIN_IDLEIMMEDIATE2:
case WIN_SETIDLE1:
case WIN_SETIDLE2:
case WIN_SLEEPNOW1:
ide_set_irq(s->bus);
break;
case WIN_SEEK:
- if(s->drive_kind == IDE_CD)
- goto abort_cmd;
/* XXX: Check that seek is within bounds */
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
break;
/* ATAPI commands */
case WIN_PIDENTIFY:
- if (s->drive_kind == IDE_CD) {
- ide_atapi_identify(s);
- s->status = READY_STAT | SEEK_STAT;
- ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
- } else {
- ide_abort_command(s);
- }
+ ide_atapi_identify(s);
+ s->status = READY_STAT | SEEK_STAT;
+ ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
ide_set_irq(s->bus);
break;
case WIN_DIAGNOSE:
*/
ide_set_irq(s->bus);
break;
- case WIN_SRST:
- if (s->drive_kind != IDE_CD)
- goto abort_cmd;
+ case WIN_DEVICE_RESET:
ide_set_signature(s);
s->status = 0x00; /* NOTE: READY is _not_ set */
s->error = 0x01;
break;
case WIN_PACKETCMD:
- if (s->drive_kind != IDE_CD)
- goto abort_cmd;
/* overlapping commands not supported */
if (s->feature & 0x02)
goto abort_cmd;
break;
/* CF-ATA commands */
case CFA_REQ_EXT_ERROR_CODE:
- if (s->drive_kind != IDE_CFATA)
- goto abort_cmd;
s->error = 0x09; /* miscellaneous error */
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
break;
case CFA_ERASE_SECTORS:
case CFA_WEAR_LEVEL:
- if (s->drive_kind != IDE_CFATA)
- goto abort_cmd;
if (val == CFA_WEAR_LEVEL)
s->nsector = 0;
if (val == CFA_ERASE_SECTORS)
ide_set_irq(s->bus);
break;
case CFA_TRANSLATE_SECTOR:
- if (s->drive_kind != IDE_CFATA)
- goto abort_cmd;
s->error = 0x00;
s->status = READY_STAT | SEEK_STAT;
memset(s->io_buffer, 0, 0x200);
ide_set_irq(s->bus);
break;
case CFA_ACCESS_METADATA_STORAGE:
- if (s->drive_kind != IDE_CFATA)
- goto abort_cmd;
switch (s->feature) {
case 0x02: /* Inquiry Metadata Storage */
ide_cfata_metadata_inquiry(s);
ide_set_irq(s->bus);
break;
case IBM_SENSE_CONDITION:
- if (s->drive_kind != IDE_CFATA)
- goto abort_cmd;
switch (s->feature) {
case 0x01: /* sense temperature in device */
s->nsector = 0x50; /* +20 C */
ide_set_irq(s->bus);
break;
- case WIN_SMART:
- if (s->drive_kind == IDE_CD)
- goto abort_cmd;
+ case WIN_SMART:
if (s->hcyl != 0xc2 || s->lcyl != 0x4f)
goto abort_cmd;
if (!s->smart_enabled && s->feature != SMART_ENABLE)
}
break;
default:
+ /* should not be reachable */
abort_cmd:
ide_abort_command(s);
ide_set_irq(s->bus);
bus->cmd = val;
}
+/*
+ * Returns true if the running PIO transfer is a PIO out (i.e. data is
+ * transferred from the device to the guest), false if it's a PIO in
+ */
+static bool ide_is_pio_out(IDEState *s)
+{
+ if (s->end_transfer_func == ide_sector_write ||
+ s->end_transfer_func == ide_atapi_cmd) {
+ return false;
+ } else if (s->end_transfer_func == ide_sector_read ||
+ s->end_transfer_func == ide_transfer_stop ||
+ s->end_transfer_func == ide_atapi_cmd_reply_end ||
+ s->end_transfer_func == ide_dummy_transfer_stop) {
+ return true;
+ }
+
+ abort();
+}
+
void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
{
IDEBus *bus = opaque;
IDEState *s = idebus_active_if(bus);
uint8_t *p;
- /* PIO data access allowed only when DRQ bit is set */
- if (!(s->status & DRQ_STAT))
+ /* PIO data access allowed only when DRQ bit is set. The result of a write
+ * during PIO out is indeterminate, just ignore it. */
+ if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
return;
+ }
p = s->data_ptr;
*(uint16_t *)p = le16_to_cpu(val);
uint8_t *p;
int ret;
- /* PIO data access allowed only when DRQ bit is set */
- if (!(s->status & DRQ_STAT))
+ /* PIO data access allowed only when DRQ bit is set. The result of a read
+ * during PIO in is indeterminate, return 0 and don't move forward. */
+ if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
return 0;
+ }
p = s->data_ptr;
ret = cpu_to_le16(*(uint16_t *)p);
IDEState *s = idebus_active_if(bus);
uint8_t *p;
- /* PIO data access allowed only when DRQ bit is set */
- if (!(s->status & DRQ_STAT))
+ /* PIO data access allowed only when DRQ bit is set. The result of a write
+ * during PIO out is indeterminate, just ignore it. */
+ if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
return;
+ }
p = s->data_ptr;
*(uint32_t *)p = le32_to_cpu(val);
uint8_t *p;
int ret;
- /* PIO data access allowed only when DRQ bit is set */
- if (!(s->status & DRQ_STAT))
+ /* PIO data access allowed only when DRQ bit is set. The result of a read
+ * during PIO in is indeterminate, return 0 and don't move forward. */
+ if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
return 0;
+ }
p = s->data_ptr;
ret = cpu_to_le32(*(uint32_t *)p);
bus->dma->ops->reset(bus->dma);
}
-int ide_init_drive(IDEState *s, BlockDriverState *bs,
+static bool ide_cd_is_tray_open(void *opaque)
+{
+ return ((IDEState *)opaque)->tray_open;
+}
+
+static bool ide_cd_is_medium_locked(void *opaque)
+{
+ return ((IDEState *)opaque)->tray_locked;
+}
+
+static const BlockDevOps ide_cd_block_ops = {
+ .change_media_cb = ide_cd_change_cb,
+ .is_tray_open = ide_cd_is_tray_open,
+ .is_medium_locked = ide_cd_is_medium_locked,
+};
+
+int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
const char *version, const char *serial)
{
int cylinders, heads, secs;
uint64_t nb_sectors;
s->bs = bs;
+ s->drive_kind = kind;
+
bdrv_get_geometry(bs, &nb_sectors);
bdrv_guess_geometry(bs, &cylinders, &heads, &secs);
if (cylinders < 1 || cylinders > 16383) {
s->smart_autosave = 1;
s->smart_errors = 0;
s->smart_selftest_count = 0;
- if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) {
- s->drive_kind = IDE_CD;
- bdrv_set_change_cb(bs, cdrom_change_cb, s);
- bs->buffer_alignment = 2048;
+ if (kind == IDE_CD) {
+ bdrv_set_dev_ops(bs, &ide_cd_block_ops, s);
+ bdrv_set_buffer_alignment(bs, 2048);
} else {
if (!bdrv_is_inserted(s->bs)) {
error_report("Device needs media, but drive is empty");
}
ide_reset(s);
- bdrv_set_removable(bs, s->drive_kind == IDE_CD);
+ bdrv_iostatus_enable(bs);
return 0;
}
s->unit = unit;
s->drive_serial = drive_serial++;
/* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
- s->io_buffer = qemu_memalign(2048, IDE_DMA_BUF_SECTORS*512 + 4);
s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
+ s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
+ memset(s->io_buffer, 0, s->io_buffer_total_len);
+
s->smart_selftest_data = qemu_blockalign(s->bs, 512);
+ memset(s->smart_selftest_data, 0, 512);
+
s->sector_write_timer = qemu_new_timer_ns(vm_clock,
ide_sector_write_timer_cb, s);
}
return 0;
}
-static void ide_nop_restart(void *opaque, int x, int y)
+static void ide_nop_restart(void *opaque, int x, RunState y)
{
}
dinfo = i == 0 ? hd0 : hd1;
ide_init1(bus, i);
if (dinfo) {
- if (ide_init_drive(&bus->ifs[i], dinfo->bdrv, NULL,
+ if (ide_init_drive(&bus->ifs[i], dinfo->bdrv,
+ dinfo->media_cd ? IDE_CD : IDE_HD, NULL,
*dinfo->serial ? dinfo->serial : NULL) < 0) {
error_report("Can't set up IDE drive %s", dinfo->id);
exit(1);
}
+ bdrv_attach_dev_nofail(dinfo->bdrv, &bus->ifs[i]);
} else {
ide_reset(&bus->ifs[i]);
}
bus->dma = &ide_dma_nop;
}
-void ide_init_ioport(IDEBus *bus, int iobase, int iobase2)
+static const MemoryRegionPortio ide_portio_list[] = {
+ { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
+ { 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew },
+ { 0, 4, 4, .read = ide_data_readl, .write = ide_data_writel },
+ PORTIO_END_OF_LIST(),
+};
+
+static const MemoryRegionPortio ide_portio2_list[] = {
+ { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
+ PORTIO_END_OF_LIST(),
+};
+
+void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
{
- register_ioport_write(iobase, 8, 1, ide_ioport_write, bus);
- register_ioport_read(iobase, 8, 1, ide_ioport_read, bus);
+ /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
+ bridge has been setup properly to always register with ISA. */
+ isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
+
if (iobase2) {
- register_ioport_read(iobase2, 1, 1, ide_status_read, bus);
- register_ioport_write(iobase2, 1, 1, ide_cmd_write, bus);
+ isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
}
-
- /* data ports */
- register_ioport_write(iobase, 2, 2, ide_data_writew, bus);
- register_ioport_read(iobase, 2, 2, ide_data_readw, bus);
- register_ioport_write(iobase, 4, 4, ide_data_writel, bus);
- register_ioport_read(iobase, 4, 4, ide_data_readl, bus);
}
static bool is_identify_set(void *opaque, int version_id)
IDEState *s = opaque;
if (version_id < 3) {
- if (s->sense_key == SENSE_UNIT_ATTENTION &&
+ if (s->sense_key == UNIT_ATTENTION &&
s->asc == ASC_MEDIUM_MAY_HAVE_CHANGED) {
s->cdrom_changed = 1;
}
{
IDEState *s = opaque;
- if (s->end_transfer_fn_idx > ARRAY_SIZE(transfer_end_table)) {
+ if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
return -EINVAL;
}
s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
{
IDEState *s = opaque;
- return (s->status & DRQ_STAT) != 0;
+ return ((s->status & DRQ_STAT) != 0)
+ || (s->bus->error_status & BM_STATUS_PIO_RETRY);
+}
+
+static int ide_tray_state_post_load(void *opaque, int version_id)
+{
+ IDEState *s = opaque;
+
+ bdrv_eject(s->bs, s->tray_open);
+ bdrv_lock_medium(s->bs, s->tray_locked);
+ return 0;
+}
+
+static bool ide_tray_state_needed(void *opaque)
+{
+ IDEState *s = opaque;
+
+ return s->tray_open || s->tray_locked;
}
static bool ide_atapi_gesn_needed(void *opaque)
return s->events.new_media || s->events.eject_request;
}
+static bool ide_error_needed(void *opaque)
+{
+ IDEBus *bus = opaque;
+
+ return (bus->error_status != 0);
+}
+
/* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
-const VMStateDescription vmstate_ide_atapi_gesn_state = {
+static const VMStateDescription vmstate_ide_atapi_gesn_state = {
.name ="ide_drive/atapi/gesn_state",
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField []) {
VMSTATE_BOOL(events.new_media, IDEState),
VMSTATE_BOOL(events.eject_request, IDEState),
+ VMSTATE_END_OF_LIST()
}
};
-const VMStateDescription vmstate_ide_drive_pio_state = {
+static const VMStateDescription vmstate_ide_tray_state = {
+ .name = "ide_drive/tray_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .post_load = ide_tray_state_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(tray_open, IDEState),
+ VMSTATE_BOOL(tray_locked, IDEState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_ide_drive_pio_state = {
.name = "ide_drive/pio_state",
.version_id = 1,
.minimum_version_id = 1,
{
.vmsd = &vmstate_ide_drive_pio_state,
.needed = ide_drive_pio_state_needed,
+ }, {
+ .vmsd = &vmstate_ide_tray_state,
+ .needed = ide_tray_state_needed,
}, {
.vmsd = &vmstate_ide_atapi_gesn_state,
.needed = ide_atapi_gesn_needed,
}
};
+static const VMStateDescription vmstate_ide_error_status = {
+ .name ="ide_bus/error",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_INT32(error_status, IDEBus),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_ide_bus = {
.name = "ide_bus",
.version_id = 1,
VMSTATE_UINT8(cmd, IDEBus),
VMSTATE_UINT8(unit, IDEBus),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (VMStateSubsection []) {
+ {
+ .vmsd = &vmstate_ide_error_status,
+ .needed = ide_error_needed,
+ }, {
+ /* empty */
+ }
}
};