*p = cpu_to_le16(v);
}
+static void ide_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ put_le16(p + 60, s->nb_sectors);
+ put_le16(p + 61, s->nb_sectors >> 16);
+ put_le16(p + 100, s->nb_sectors);
+ put_le16(p + 101, s->nb_sectors >> 16);
+ put_le16(p + 102, s->nb_sectors >> 32);
+ put_le16(p + 103, s->nb_sectors >> 48);
+}
+
static void ide_identify(IDEState *s)
{
uint16_t *p;
unsigned int oldsize;
IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
+ p = (uint16_t *)s->identify_data;
if (s->identify_set) {
- memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data));
- return;
+ goto fill_buffer;
}
+ memset(p, 0, sizeof(s->identify_data));
- memset(s->io_buffer, 0, 512);
- p = (uint16_t *)s->io_buffer;
put_le16(p + 0, 0x0040);
put_le16(p + 1, s->cylinders);
put_le16(p + 3, s->heads);
put_le16(p + 58, oldsize >> 16);
if (s->mult_sectors)
put_le16(p + 59, 0x100 | s->mult_sectors);
- put_le16(p + 60, s->nb_sectors);
- put_le16(p + 61, s->nb_sectors >> 16);
+ /* *(p + 60) := nb_sectors -- see ide_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
put_le16(p + 62, 0x07); /* single word dma0-2 supported */
put_le16(p + 63, 0x07); /* mdma0-2 supported */
put_le16(p + 64, 0x03); /* pio3-4 supported */
}
put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
put_le16(p + 93, 1 | (1 << 14) | 0x2000);
- put_le16(p + 100, s->nb_sectors);
- put_le16(p + 101, s->nb_sectors >> 16);
- put_le16(p + 102, s->nb_sectors >> 32);
- put_le16(p + 103, s->nb_sectors >> 48);
+ /* *(p + 100) := nb_sectors -- see ide_identify_size */
+ /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
+ /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
+ /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
if (dev && dev->conf.physical_block_size)
put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
put_le16(p + 169, 1); /* TRIM support */
}
- memcpy(s->identify_data, p, sizeof(s->identify_data));
+ ide_identify_size(s);
s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
}
static void ide_atapi_identify(IDEState *s)
{
uint16_t *p;
+ p = (uint16_t *)s->identify_data;
if (s->identify_set) {
- memcpy(s->io_buffer, s->identify_data, sizeof(s->identify_data));
- return;
+ goto fill_buffer;
}
+ memset(p, 0, sizeof(s->identify_data));
- memset(s->io_buffer, 0, 512);
- p = (uint16_t *)s->io_buffer;
/* Removable CDROM, 50us response, 12 byte packets */
put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
}
put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
+ if (s->wwn) {
+ put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
+ put_le16(p + 87, (1 << 8)); /* WWN enabled */
+ }
+
#ifdef USE_DMA_CDROM
put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
#endif
- memcpy(s->identify_data, p, sizeof(s->identify_data));
+
+ if (s->wwn) {
+ /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
+ put_le16(p + 108, s->wwn >> 48);
+ put_le16(p + 109, s->wwn >> 32);
+ put_le16(p + 110, s->wwn >> 16);
+ put_le16(p + 111, s->wwn);
+ }
+
s->identify_set = 1;
+
+fill_buffer:
+ memcpy(s->io_buffer, p, sizeof(s->identify_data));
+}
+
+static void ide_cfata_identify_size(IDEState *s)
+{
+ uint16_t *p = (uint16_t *)s->identify_data;
+ put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
+ put_le16(p + 8, s->nb_sectors); /* Sectors per card */
+ put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
+ put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
}
static void ide_cfata_identify(IDEState *s)
uint16_t *p;
uint32_t cur_sec;
- p = (uint16_t *) s->identify_data;
- if (s->identify_set)
+ p = (uint16_t *)s->identify_data;
+ if (s->identify_set) {
goto fill_buffer;
-
+ }
memset(p, 0, sizeof(s->identify_data));
cur_sec = s->cylinders * s->heads * s->sectors;
put_le16(p + 1, s->cylinders); /* Default cylinders */
put_le16(p + 3, s->heads); /* Default heads */
put_le16(p + 6, s->sectors); /* Default sectors per track */
- put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
- put_le16(p + 8, s->nb_sectors); /* Sectors per card */
+ /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
+ /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
put_le16(p + 22, 0x0004); /* ECC bytes */
padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
put_le16(p + 58, cur_sec >> 16); /* Current capacity */
if (s->mult_sectors) /* Multiple sector setting */
put_le16(p + 59, 0x100 | s->mult_sectors);
- put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
- put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
+ /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
+ /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
put_le16(p + 63, 0x0203); /* Multiword DMA capability */
put_le16(p + 64, 0x0001); /* Flow Control PIO support */
put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
put_le16(p + 160, 0x8100); /* Power requirement */
put_le16(p + 161, 0x8001); /* CF command set */
+ ide_cfata_identify_size(s);
s->identify_set = 1;
fill_buffer:
{
TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
- /* Exit the loop in case bdrv_aio_cancel calls ide_issue_trim_cb again. */
+ /* Exit the loop so ide_issue_trim_cb will not continue */
iocb->j = iocb->qiov->niov - 1;
iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
- /* Tell ide_issue_trim_cb not to trigger the completion, too. */
- qemu_bh_delete(iocb->bh);
- iocb->bh = NULL;
+ iocb->ret = -ECANCELED;
if (iocb->aiocb) {
- bdrv_aio_cancel(iocb->aiocb);
+ bdrv_aio_cancel_async(iocb->aiocb);
+ iocb->aiocb = NULL;
}
- qemu_aio_release(iocb);
}
static const AIOCBInfo trim_aiocb_info = {
.aiocb_size = sizeof(TrimAIOCB),
- .cancel = trim_aio_cancel,
+ .cancel_async = trim_aio_cancel,
};
static void ide_trim_bh_cb(void *opaque)
qemu_bh_delete(iocb->bh);
iocb->bh = NULL;
- qemu_aio_release(iocb);
+ qemu_aio_unref(iocb);
}
static void ide_issue_trim_cb(void *opaque, int ret)
static inline void ide_abort_command(IDEState *s)
{
+ ide_transfer_stop(s);
s->status = READY_STAT | ERR_STAT;
s->error = ABRT_ERR;
}
if (!(s->status & ERR_STAT)) {
s->status |= DRQ_STAT;
}
- s->bus->dma->ops->start_transfer(s->bus->dma);
+ if (s->bus->dma->ops->start_transfer) {
+ s->bus->dma->ops->start_transfer(s->bus->dma);
+ }
+}
+
+static void ide_cmd_done(IDEState *s)
+{
+ if (s->bus->dma->ops->cmd_done) {
+ s->bus->dma->ops->cmd_done(s->bus->dma);
+ }
}
void ide_transfer_stop(IDEState *s)
s->data_ptr = s->io_buffer;
s->data_end = s->io_buffer;
s->status &= ~DRQ_STAT;
+ ide_cmd_done(s);
}
int64_t ide_get_sector(IDEState *s)
ide_set_irq(s->bus);
}
+static bool ide_sect_range_ok(IDEState *s,
+ uint64_t sector, uint64_t nb_sectors)
+{
+ uint64_t total_sectors;
+
+ bdrv_get_geometry(s->bs, &total_sectors);
+ if (sector > total_sectors || nb_sectors > total_sectors - sector) {
+ return false;
+ }
+ return true;
+}
+
static void ide_sector_read_cb(void *opaque, int ret)
{
IDEState *s = opaque;
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
- bdrv_acct_done(s->bs, &s->acct);
+ if (ret == -ECANCELED) {
+ return;
+ }
+ block_acct_done(bdrv_get_stats(s->bs), &s->acct);
if (ret != 0) {
- if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY |
- BM_STATUS_RETRY_READ)) {
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
+ IDE_RETRY_READ)) {
return;
}
}
printf("sector=%" PRId64 "\n", sector_num);
#endif
+ if (!ide_sect_range_ok(s, sector_num, n)) {
+ ide_rw_error(s);
+ return;
+ }
+
s->iov.iov_base = s->io_buffer;
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
- bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
+ block_acct_start(bdrv_get_stats(s->bs), &s->acct,
+ n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_readv(s->bs, sector_num, &s->qiov, n,
ide_sector_read_cb, s);
}
qemu_sglist_destroy(&s->sg);
}
-void ide_set_inactive(IDEState *s)
+void ide_set_inactive(IDEState *s, bool more)
{
s->bus->dma->aiocb = NULL;
- s->bus->dma->ops->set_inactive(s->bus->dma);
+ if (s->bus->dma->ops->set_inactive) {
+ s->bus->dma->ops->set_inactive(s->bus->dma, more);
+ }
+ ide_cmd_done(s);
}
void ide_dma_error(IDEState *s)
{
- ide_transfer_stop(s);
- s->error = ABRT_ERR;
- s->status = READY_STAT | ERR_STAT;
- ide_set_inactive(s);
+ ide_abort_command(s);
+ ide_set_inactive(s, false);
ide_set_irq(s->bus);
}
static int ide_handle_rw_error(IDEState *s, int error, int op)
{
- bool is_read = (op & BM_STATUS_RETRY_READ) != 0;
+ bool is_read = (op & IDE_RETRY_READ) != 0;
BlockErrorAction action = bdrv_get_error_action(s->bs, is_read, error);
- if (action == BDRV_ACTION_STOP) {
+ if (action == BLOCK_ERROR_ACTION_STOP) {
s->bus->dma->ops->set_unit(s->bus->dma, s->unit);
s->bus->error_status = op;
- } else if (action == BDRV_ACTION_REPORT) {
- if (op & BM_STATUS_DMA_RETRY) {
+ } else if (action == BLOCK_ERROR_ACTION_REPORT) {
+ if (op & IDE_RETRY_DMA) {
dma_buf_commit(s);
ide_dma_error(s);
} else {
}
}
bdrv_error_action(s->bs, action, is_read, error);
- return action != BDRV_ACTION_IGNORE;
+ return action != BLOCK_ERROR_ACTION_IGNORE;
}
void ide_dma_cb(void *opaque, int ret)
int64_t sector_num;
bool stay_active = false;
+ if (ret == -ECANCELED) {
+ return;
+ }
if (ret < 0) {
- int op = BM_STATUS_DMA_RETRY;
+ int op = IDE_RETRY_DMA;
if (s->dma_cmd == IDE_DMA_READ)
- op |= BM_STATUS_RETRY_READ;
+ op |= IDE_RETRY_READ;
else if (s->dma_cmd == IDE_DMA_TRIM)
- op |= BM_STATUS_RETRY_TRIM;
+ op |= IDE_RETRY_TRIM;
if (ide_handle_rw_error(s, -ret, op)) {
return;
sector_num, n, s->dma_cmd);
#endif
+ if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
+ !ide_sect_range_ok(s, sector_num, n)) {
+ dma_buf_commit(s);
+ ide_dma_error(s);
+ return;
+ }
+
switch (s->dma_cmd) {
case IDE_DMA_READ:
s->bus->dma->aiocb = dma_bdrv_read(s->bs, &s->sg, sector_num,
eot:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
- bdrv_acct_done(s->bs, &s->acct);
- }
- ide_set_inactive(s);
- if (stay_active) {
- s->bus->dma->ops->add_status(s->bus->dma, BM_STATUS_DMAING);
+ block_acct_done(bdrv_get_stats(s->bs), &s->acct);
}
+ ide_set_inactive(s, stay_active);
}
static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
switch (dma_cmd) {
case IDE_DMA_READ:
- bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
- BDRV_ACCT_READ);
+ block_acct_start(bdrv_get_stats(s->bs), &s->acct,
+ s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
break;
case IDE_DMA_WRITE:
- bdrv_acct_start(s->bs, &s->acct, s->nsector * BDRV_SECTOR_SIZE,
- BDRV_ACCT_WRITE);
+ block_acct_start(bdrv_get_stats(s->bs), &s->acct,
+ s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
break;
default:
break;
}
- s->bus->dma->ops->start_dma(s->bus->dma, s, ide_dma_cb);
+ ide_start_dma(s, ide_dma_cb);
+}
+
+void ide_start_dma(IDEState *s, BlockDriverCompletionFunc *cb)
+{
+ if (s->bus->dma->ops->start_dma) {
+ s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
+ }
}
static void ide_sector_write_timer_cb(void *opaque)
IDEState *s = opaque;
int n;
- bdrv_acct_done(s->bs, &s->acct);
+ if (ret == -ECANCELED) {
+ return;
+ }
+ block_acct_done(bdrv_get_stats(s->bs), &s->acct);
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
if (ret != 0) {
- if (ide_handle_rw_error(s, -ret, BM_STATUS_PIO_RETRY)) {
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
return;
}
}
that at the expense of slower write performances. Use this
option _only_ to install Windows 2000. You must disable it
for normal use. */
- qemu_mod_timer(s->sector_write_timer,
- qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() / 1000));
+ timer_mod(s->sector_write_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
} else {
ide_set_irq(s->bus);
}
n = s->req_nb_sectors;
}
+ if (!ide_sect_range_ok(s, sector_num, n)) {
+ ide_rw_error(s);
+ return;
+ }
+
s->iov.iov_base = s->io_buffer;
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
- bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
+ block_acct_start(bdrv_get_stats(s->bs), &s->acct,
+ n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
s->pio_aiocb = bdrv_aio_writev(s->bs, sector_num, &s->qiov, n,
ide_sector_write_cb, s);
}
{
IDEState *s = opaque;
+ s->pio_aiocb = NULL;
+
+ if (ret == -ECANCELED) {
+ return;
+ }
if (ret < 0) {
/* XXX: What sector number to set here? */
- if (ide_handle_rw_error(s, -ret, BM_STATUS_RETRY_FLUSH)) {
+ if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
return;
}
}
- bdrv_acct_done(s->bs, &s->acct);
+ if (s->bs) {
+ block_acct_done(bdrv_get_stats(s->bs), &s->acct);
+ }
s->status = READY_STAT | SEEK_STAT;
+ ide_cmd_done(s);
ide_set_irq(s->bus);
}
}
s->status |= BUSY_STAT;
- bdrv_acct_start(s->bs, &s->acct, 0, BDRV_ACCT_FLUSH);
- bdrv_aio_flush(s->bs, ide_flush_cb, s);
+ block_acct_start(bdrv_get_stats(s->bs), &s->acct, 0, BLOCK_ACCT_FLUSH);
+ s->pio_aiocb = bdrv_aio_flush(s->bs, ide_flush_cb, s);
}
static void ide_cfata_metadata_inquiry(IDEState *s)
s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
* devices to return a clear status register
* with READY_STAT *not* set. */
+ s->error = 0x01;
} else {
s->status = READY_STAT | SEEK_STAT;
/* The bits of the error register are not as usual for this command!
case 2: /* extended self test */
s->smart_selftest_count++;
if (s->smart_selftest_count > 21) {
- s->smart_selftest_count = 0;
+ s->smart_selftest_count = 1;
}
n = 2 + (s->smart_selftest_count - 1) * 24;
s->smart_selftest_data[n] = s->sector;
void ide_exec_cmd(IDEBus *bus, uint32_t val)
{
IDEState *s;
+ bool complete;
#if defined(DEBUG_IDE)
printf("ide: CMD=%02x\n", val);
return;
if (!ide_cmd_permitted(s, val)) {
- goto abort_cmd;
+ ide_abort_command(s);
+ ide_set_irq(s->bus);
+ return;
}
- if (ide_cmd_table[val].handler != NULL) {
- bool complete;
-
- s->status = READY_STAT | BUSY_STAT;
- s->error = 0;
+ s->status = READY_STAT | BUSY_STAT;
+ s->error = 0;
- complete = ide_cmd_table[val].handler(s, val);
- if (complete) {
- s->status &= ~BUSY_STAT;
- assert(!!s->error == !!(s->status & ERR_STAT));
+ complete = ide_cmd_table[val].handler(s, val);
+ if (complete) {
+ s->status &= ~BUSY_STAT;
+ assert(!!s->error == !!(s->status & ERR_STAT));
- if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
- s->status |= SEEK_STAT;
- }
-
- ide_set_irq(s->bus);
+ if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
+ s->status |= SEEK_STAT;
}
- return;
- }
-
- switch(val) {
- default:
- /* should not be reachable */
- abort_cmd:
- ide_abort_command(s);
+ ide_cmd_done(s);
ide_set_irq(s->bus);
- break;
}
}
}
/* reset dma provider too */
- bus->dma->ops->reset(bus->dma);
+ if (bus->dma->ops->reset) {
+ bus->dma->ops->reset(bus->dma);
+ }
}
static bool ide_cd_is_tray_open(void *opaque)
return ((IDEState *)opaque)->tray_locked;
}
+static void ide_resize_cb(void *opaque)
+{
+ IDEState *s = opaque;
+ uint64_t nb_sectors;
+
+ if (!s->identify_set) {
+ return;
+ }
+
+ bdrv_get_geometry(s->bs, &nb_sectors);
+ s->nb_sectors = nb_sectors;
+
+ /* Update the identify data buffer. */
+ if (s->drive_kind == IDE_CFATA) {
+ ide_cfata_identify_size(s);
+ } else {
+ /* IDE_CD uses a different set of callbacks entirely. */
+ assert(s->drive_kind != IDE_CD);
+ ide_identify_size(s);
+ }
+}
+
static const BlockDevOps ide_cd_block_ops = {
.change_media_cb = ide_cd_change_cb,
.eject_request_cb = ide_cd_eject_request_cb,
.is_medium_locked = ide_cd_is_medium_locked,
};
+static const BlockDevOps ide_hd_block_ops = {
+ .resize_cb = ide_resize_cb,
+};
+
int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
const char *version, const char *serial, const char *model,
uint64_t wwn,
s->smart_selftest_count = 0;
if (kind == IDE_CD) {
bdrv_set_dev_ops(bs, &ide_cd_block_ops, s);
- bdrv_set_buffer_alignment(bs, 2048);
+ bdrv_set_guest_block_size(bs, 2048);
} else {
if (!bdrv_is_inserted(s->bs)) {
error_report("Device needs media, but drive is empty");
error_report("Can't use a read-only drive");
return -1;
}
+ bdrv_set_dev_ops(bs, &ide_hd_block_ops, s);
}
if (serial) {
pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
s->smart_selftest_data = qemu_blockalign(s->bs, 512);
memset(s->smart_selftest_data, 0, 512);
- s->sector_write_timer = qemu_new_timer_ns(vm_clock,
+ s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
ide_sector_write_timer_cb, s);
}
-static void ide_nop_start(IDEDMA *dma, IDEState *s,
- BlockDriverCompletionFunc *cb)
-{
-}
-
-static int ide_nop(IDEDMA *dma)
-{
- return 0;
-}
-
static int ide_nop_int(IDEDMA *dma, int x)
{
return 0;
}
static const IDEDMAOps ide_dma_nop_ops = {
- .start_dma = ide_nop_start,
- .start_transfer = ide_nop,
.prepare_buf = ide_nop_int,
.rw_buf = ide_nop_int,
.set_unit = ide_nop_int,
- .add_status = ide_nop_int,
- .set_inactive = ide_nop,
.restart_cb = ide_nop_restart,
- .reset = ide_nop,
};
static IDEDMA ide_dma_nop = {
bus->dma = &ide_dma_nop;
}
-/* TODO convert users to qdev and remove */
-void ide_init2_with_non_qdev_drives(IDEBus *bus, DriveInfo *hd0,
- DriveInfo *hd1, qemu_irq irq)
-{
- int i, trans;
- DriveInfo *dinfo;
- uint32_t cyls, heads, secs;
-
- for(i = 0; i < 2; i++) {
- dinfo = i == 0 ? hd0 : hd1;
- ide_init1(bus, i);
- if (dinfo) {
- cyls = dinfo->cyls;
- heads = dinfo->heads;
- secs = dinfo->secs;
- trans = dinfo->trans;
- if (!cyls && !heads && !secs) {
- hd_geometry_guess(dinfo->bdrv, &cyls, &heads, &secs, &trans);
- } else if (trans == BIOS_ATA_TRANSLATION_AUTO) {
- trans = hd_bios_chs_auto_trans(cyls, heads, secs);
- }
- if (cyls < 1 || cyls > 65535) {
- error_report("cyls must be between 1 and 65535");
- exit(1);
- }
- if (heads < 1 || heads > 16) {
- error_report("heads must be between 1 and 16");
- exit(1);
- }
- if (secs < 1 || secs > 255) {
- error_report("secs must be between 1 and 255");
- exit(1);
- }
- if (ide_init_drive(&bus->ifs[i], dinfo->bdrv,
- dinfo->media_cd ? IDE_CD : IDE_HD,
- NULL, dinfo->serial, NULL, 0,
- cyls, heads, secs, trans) < 0) {
- error_report("Can't set up IDE drive %s", dinfo->id);
- exit(1);
- }
- bdrv_attach_dev_nofail(dinfo->bdrv, &bus->ifs[i]);
- } else {
- ide_reset(&bus->ifs[i]);
- }
- }
- bus->irq = irq;
- bus->dma = &ide_dma_nop;
-}
-
static const MemoryRegionPortio ide_portio_list[] = {
{ 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
{ 0, 2, 2, .read = ide_data_readw, .write = ide_data_writew },
IDEState *s = opaque;
return ((s->status & DRQ_STAT) != 0)
- || (s->bus->error_status & BM_STATUS_PIO_RETRY);
+ || (s->bus->error_status & IDE_RETRY_PIO);
}
static bool ide_tray_state_needed(void *opaque)
.name ="ide_drive/atapi/gesn_state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_BOOL(events.new_media, IDEState),
VMSTATE_BOOL(events.eject_request, IDEState),
VMSTATE_END_OF_LIST()
.name = "ide_drive/tray_state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.fields = (VMStateField[]) {
VMSTATE_BOOL(tray_open, IDEState),
VMSTATE_BOOL(tray_locked, IDEState),
.name = "ide_drive/pio_state",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
.pre_save = ide_drive_pio_pre_save,
.post_load = ide_drive_pio_post_load,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_INT32(req_nb_sectors, IDEState),
VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
vmstate_info_uint8, uint8_t),
.name = "ide_drive",
.version_id = 3,
.minimum_version_id = 0,
- .minimum_version_id_old = 0,
.post_load = ide_drive_post_load,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_INT32(mult_sectors, IDEState),
VMSTATE_INT32(identify_set, IDEState),
VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
.name ="ide_bus/error",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_INT32(error_status, IDEBus),
VMSTATE_END_OF_LIST()
}
.name = "ide_bus",
.version_id = 1,
.minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
+ .fields = (VMStateField[]) {
VMSTATE_UINT8(cmd, IDEBus),
VMSTATE_UINT8(unit, IDEBus),
VMSTATE_END_OF_LIST()